]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.1-4.0.4-201506021902.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.1-4.0.4-201506021902.patch
CommitLineData
16159020
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 9de9813..1462492 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -3,9 +3,11 @@
6 *.bc
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -15,6 +17,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -51,14 +54,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -72,9 +78,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -83,6 +91,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -95,32 +104,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -148,14 +168,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -165,14 +185,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -188,6 +209,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -197,6 +220,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -206,7 +230,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.h
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -216,8 +245,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -227,6 +260,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -238,13 +272,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -252,9 +290,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
238index 74b6c6d..eac0e77 100644
239--- a/Documentation/kbuild/makefiles.txt
240+++ b/Documentation/kbuild/makefiles.txt
241@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
242 === 4 Host Program support
243 --- 4.1 Simple Host Program
244 --- 4.2 Composite Host Programs
245- --- 4.3 Using C++ for host programs
246- --- 4.4 Controlling compiler options for host programs
247- --- 4.5 When host programs are actually built
248- --- 4.6 Using hostprogs-$(CONFIG_FOO)
249+ --- 4.3 Defining shared libraries
250+ --- 4.4 Using C++ for host programs
251+ --- 4.5 Controlling compiler options for host programs
252+ --- 4.6 When host programs are actually built
253+ --- 4.7 Using hostprogs-$(CONFIG_FOO)
254
255 === 5 Kbuild clean infrastructure
256
257@@ -643,7 +644,29 @@ Both possibilities are described in the following.
258 Finally, the two .o files are linked to the executable, lxdialog.
259 Note: The syntax <executable>-y is not permitted for host-programs.
260
261---- 4.3 Using C++ for host programs
262+--- 4.3 Defining shared libraries
263+
264+ Objects with extension .so are considered shared libraries, and
265+ will be compiled as position independent objects.
266+ Kbuild provides support for shared libraries, but the usage
267+ shall be restricted.
268+ In the following example the libkconfig.so shared library is used
269+ to link the executable conf.
270+
271+ Example:
272+ #scripts/kconfig/Makefile
273+ hostprogs-y := conf
274+ conf-objs := conf.o libkconfig.so
275+ libkconfig-objs := expr.o type.o
276+
277+ Shared libraries always require a corresponding -objs line, and
278+ in the example above the shared library libkconfig is composed by
279+ the two objects expr.o and type.o.
280+ expr.o and type.o will be built as position independent code and
281+ linked as a shared library libkconfig.so. C++ is not supported for
282+ shared libraries.
283+
284+--- 4.4 Using C++ for host programs
285
286 kbuild offers support for host programs written in C++. This was
287 introduced solely to support kconfig, and is not recommended
288@@ -666,7 +689,7 @@ Both possibilities are described in the following.
289 qconf-cxxobjs := qconf.o
290 qconf-objs := check.o
291
292---- 4.4 Controlling compiler options for host programs
293+--- 4.5 Controlling compiler options for host programs
294
295 When compiling host programs, it is possible to set specific flags.
296 The programs will always be compiled utilising $(HOSTCC) passed
297@@ -694,7 +717,7 @@ Both possibilities are described in the following.
298 When linking qconf, it will be passed the extra option
299 "-L$(QTDIR)/lib".
300
301---- 4.5 When host programs are actually built
302+--- 4.6 When host programs are actually built
303
304 Kbuild will only build host-programs when they are referenced
305 as a prerequisite.
306@@ -725,7 +748,7 @@ Both possibilities are described in the following.
307 This will tell kbuild to build lxdialog even if not referenced in
308 any rule.
309
310---- 4.6 Using hostprogs-$(CONFIG_FOO)
311+--- 4.7 Using hostprogs-$(CONFIG_FOO)
312
313 A typical pattern in a Kbuild file looks like this:
314
315diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
316index 4d68ec8..9546b75 100644
317--- a/Documentation/kernel-parameters.txt
318+++ b/Documentation/kernel-parameters.txt
319@@ -1203,6 +1203,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
320 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
321 Default: 1024
322
323+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
324+ ignore grsecurity's /proc restrictions
325+
326+ grsec_sysfs_restrict= Format: 0 | 1
327+ Default: 1
328+ Disables GRKERNSEC_SYSFS_RESTRICT if enabled in config
329+
330 hashdist= [KNL,NUMA] Large hashes allocated during boot
331 are distributed across NUMA nodes. Defaults on
332 for 64-bit NUMA, off otherwise.
333@@ -2300,6 +2307,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
334 noexec=on: enable non-executable mappings (default)
335 noexec=off: disable non-executable mappings
336
337+ nopcid [X86-64]
338+ Disable PCID (Process-Context IDentifier) even if it
339+ is supported by the processor.
340+
341 nosmap [X86]
342 Disable SMAP (Supervisor Mode Access Prevention)
343 even if it is supported by processor.
344@@ -2601,6 +2612,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
345 the specified number of seconds. This is to be used if
346 your oopses keep scrolling off the screen.
347
348+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
349+ virtualization environments that don't cope well with the
350+ expand down segment used by UDEREF on X86-32 or the frequent
351+ page table updates on X86-64.
352+
353+ pax_sanitize_slab=
354+ Format: { 0 | 1 | off | fast | full }
355+ Options '0' and '1' are only provided for backward
356+ compatibility, 'off' or 'fast' should be used instead.
357+ 0|off : disable slab object sanitization
358+ 1|fast: enable slab object sanitization excluding
359+ whitelisted slabs (default)
360+ full : sanitize all slabs, even the whitelisted ones
361+
362+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
363+
364+ pax_extra_latent_entropy
365+ Enable a very simple form of latent entropy extraction
366+ from the first 4GB of memory as the bootmem allocator
367+ passes the memory pages to the buddy allocator.
368+
369+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
370+ when the processor supports PCID.
371+
372 pcbit= [HW,ISDN]
373
374 pcd. [PARIDE]
375diff --git a/Makefile b/Makefile
376index 3d16bcc..c31faf4 100644
377--- a/Makefile
378+++ b/Makefile
379@@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
380 HOSTCC = gcc
381 HOSTCXX = g++
382 HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
383-HOSTCXXFLAGS = -O2
384+HOSTCFLAGS = -W -Wno-unused-parameter -Wno-missing-field-initializers -fno-delete-null-pointer-checks
385+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
386+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
387
388 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
389 HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
390@@ -446,8 +448,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
391 # Rules shared between *config targets and build targets
392
393 # Basic helpers built in scripts/
394-PHONY += scripts_basic
395-scripts_basic:
396+PHONY += scripts_basic gcc-plugins
397+scripts_basic: gcc-plugins
398 $(Q)$(MAKE) $(build)=scripts/basic
399 $(Q)rm -f .tmp_quiet_recordmcount
400
401@@ -622,6 +624,74 @@ endif
402 # Tell gcc to never replace conditional load with a non-conditional one
403 KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
404
405+ifndef DISABLE_PAX_PLUGINS
406+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
407+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
408+else
409+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
410+endif
411+ifneq ($(PLUGINCC),)
412+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
413+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
414+endif
415+ifdef CONFIG_PAX_MEMORY_STACKLEAK
416+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
417+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
418+endif
419+ifdef CONFIG_KALLOCSTAT_PLUGIN
420+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
421+endif
422+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
423+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
424+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
425+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
426+endif
427+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
428+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
429+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
430+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
431+endif
432+endif
433+ifdef CONFIG_CHECKER_PLUGIN
434+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
435+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
436+endif
437+endif
438+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
439+ifdef CONFIG_PAX_SIZE_OVERFLOW
440+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
441+endif
442+ifdef CONFIG_PAX_LATENT_ENTROPY
443+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
444+endif
445+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
446+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
447+endif
448+INITIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/initify_plugin.so -DINITIFY_PLUGIN
449+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
450+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
451+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
452+GCC_PLUGINS_CFLAGS += $(INITIFY_PLUGIN_CFLAGS)
453+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
454+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
455+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
456+ifeq ($(KBUILD_EXTMOD),)
457+gcc-plugins:
458+ $(Q)$(MAKE) $(build)=tools/gcc
459+else
460+gcc-plugins: ;
461+endif
462+else
463+gcc-plugins:
464+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
465+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
466+else
467+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
468+endif
469+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
470+endif
471+endif
472+
473 ifdef CONFIG_READABLE_ASM
474 # Disable optimizations that make assembler listings hard to read.
475 # reorder blocks reorders the control in the function
476@@ -714,7 +784,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
477 else
478 KBUILD_CFLAGS += -g
479 endif
480-KBUILD_AFLAGS += -Wa,-gdwarf-2
481+KBUILD_AFLAGS += -Wa,--gdwarf-2
482 endif
483 ifdef CONFIG_DEBUG_INFO_DWARF4
484 KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
485@@ -884,7 +954,7 @@ export mod_sign_cmd
486
487
488 ifeq ($(KBUILD_EXTMOD),)
489-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
490+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
491
492 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
493 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
494@@ -934,6 +1004,8 @@ endif
495
496 # The actual objects are generated when descending,
497 # make sure no implicit rule kicks in
498+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
499+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
500 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
501
502 # Handle descending into subdirectories listed in $(vmlinux-dirs)
503@@ -943,7 +1015,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
504 # Error messages still appears in the original language
505
506 PHONY += $(vmlinux-dirs)
507-$(vmlinux-dirs): prepare scripts
508+$(vmlinux-dirs): gcc-plugins prepare scripts
509 $(Q)$(MAKE) $(build)=$@
510
511 define filechk_kernel.release
512@@ -986,10 +1058,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
513
514 archprepare: archheaders archscripts prepare1 scripts_basic
515
516+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
517+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
518 prepare0: archprepare FORCE
519 $(Q)$(MAKE) $(build)=.
520
521 # All the preparing..
522+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
523 prepare: prepare0
524
525 # Generate some files
526@@ -1103,6 +1178,8 @@ all: modules
527 # using awk while concatenating to the final file.
528
529 PHONY += modules
530+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
531+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
532 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
533 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
534 @$(kecho) ' Building modules, stage 2.';
535@@ -1118,7 +1195,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
536
537 # Target to prepare building external modules
538 PHONY += modules_prepare
539-modules_prepare: prepare scripts
540+modules_prepare: gcc-plugins prepare scripts
541
542 # Target to install modules
543 PHONY += modules_install
544@@ -1184,7 +1261,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \
545 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
546 signing_key.priv signing_key.x509 x509.genkey \
547 extra_certificates signing_key.x509.keyid \
548- signing_key.x509.signer vmlinux-gdb.py
549+ signing_key.x509.signer vmlinux-gdb.py \
550+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
551+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
552+ tools/gcc/randomize_layout_seed.h
553
554 # clean - Delete most, but leave enough to build external modules
555 #
556@@ -1223,7 +1303,7 @@ distclean: mrproper
557 @find $(srctree) $(RCS_FIND_IGNORE) \
558 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
559 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
560- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
561+ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
562 -type f -print | xargs rm -f
563
564
565@@ -1389,6 +1469,8 @@ PHONY += $(module-dirs) modules
566 $(module-dirs): crmodverdir $(objtree)/Module.symvers
567 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
568
569+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
570+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
571 modules: $(module-dirs)
572 @$(kecho) ' Building modules, stage 2.';
573 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
574@@ -1529,17 +1611,21 @@ else
575 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
576 endif
577
578-%.s: %.c prepare scripts FORCE
579+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
580+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
581+%.s: %.c gcc-plugins prepare scripts FORCE
582 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
583 %.i: %.c prepare scripts FORCE
584 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
585-%.o: %.c prepare scripts FORCE
586+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
587+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
588+%.o: %.c gcc-plugins prepare scripts FORCE
589 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
590 %.lst: %.c prepare scripts FORCE
591 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
592-%.s: %.S prepare scripts FORCE
593+%.s: %.S gcc-plugins prepare scripts FORCE
594 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
595-%.o: %.S prepare scripts FORCE
596+%.o: %.S gcc-plugins prepare scripts FORCE
597 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
598 %.symtypes: %.c prepare scripts FORCE
599 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
600@@ -1551,11 +1637,15 @@ endif
601 $(build)=$(build-dir)
602 # Make sure the latest headers are built for Documentation
603 Documentation/: headers_install
604-%/: prepare scripts FORCE
605+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
606+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
607+%/: gcc-plugins prepare scripts FORCE
608 $(cmd_crmodverdir)
609 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
610 $(build)=$(build-dir)
611-%.ko: prepare scripts FORCE
612+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
613+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
614+%.ko: gcc-plugins prepare scripts FORCE
615 $(cmd_crmodverdir)
616 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
617 $(build)=$(build-dir) $(@:.ko=.o)
618diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
619index 8f8eafb..3405f46 100644
620--- a/arch/alpha/include/asm/atomic.h
621+++ b/arch/alpha/include/asm/atomic.h
622@@ -239,4 +239,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
623 #define atomic_dec(v) atomic_sub(1,(v))
624 #define atomic64_dec(v) atomic64_sub(1,(v))
625
626+#define atomic64_read_unchecked(v) atomic64_read(v)
627+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
628+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
629+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
630+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
631+#define atomic64_inc_unchecked(v) atomic64_inc(v)
632+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
633+#define atomic64_dec_unchecked(v) atomic64_dec(v)
634+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
635+
636 #endif /* _ALPHA_ATOMIC_H */
637diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
638index ad368a9..fbe0f25 100644
639--- a/arch/alpha/include/asm/cache.h
640+++ b/arch/alpha/include/asm/cache.h
641@@ -4,19 +4,19 @@
642 #ifndef __ARCH_ALPHA_CACHE_H
643 #define __ARCH_ALPHA_CACHE_H
644
645+#include <linux/const.h>
646
647 /* Bytes per L1 (data) cache line. */
648 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
649-# define L1_CACHE_BYTES 64
650 # define L1_CACHE_SHIFT 6
651 #else
652 /* Both EV4 and EV5 are write-through, read-allocate,
653 direct-mapped, physical.
654 */
655-# define L1_CACHE_BYTES 32
656 # define L1_CACHE_SHIFT 5
657 #endif
658
659+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
660 #define SMP_CACHE_BYTES L1_CACHE_BYTES
661
662 #endif
663diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
664index 968d999..d36b2df 100644
665--- a/arch/alpha/include/asm/elf.h
666+++ b/arch/alpha/include/asm/elf.h
667@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
668
669 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
670
671+#ifdef CONFIG_PAX_ASLR
672+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
673+
674+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
675+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
676+#endif
677+
678 /* $0 is set by ld.so to a pointer to a function which might be
679 registered using atexit. This provides a mean for the dynamic
680 linker to call DT_FINI functions for shared libraries that have
681diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
682index aab14a0..b4fa3e7 100644
683--- a/arch/alpha/include/asm/pgalloc.h
684+++ b/arch/alpha/include/asm/pgalloc.h
685@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
686 pgd_set(pgd, pmd);
687 }
688
689+static inline void
690+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
691+{
692+ pgd_populate(mm, pgd, pmd);
693+}
694+
695 extern pgd_t *pgd_alloc(struct mm_struct *mm);
696
697 static inline void
698diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
699index a9a1195..e9b8417 100644
700--- a/arch/alpha/include/asm/pgtable.h
701+++ b/arch/alpha/include/asm/pgtable.h
702@@ -101,6 +101,17 @@ struct vm_area_struct;
703 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
704 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
705 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
706+
707+#ifdef CONFIG_PAX_PAGEEXEC
708+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
709+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
710+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
711+#else
712+# define PAGE_SHARED_NOEXEC PAGE_SHARED
713+# define PAGE_COPY_NOEXEC PAGE_COPY
714+# define PAGE_READONLY_NOEXEC PAGE_READONLY
715+#endif
716+
717 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
718
719 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
720diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
721index 2fd00b7..cfd5069 100644
722--- a/arch/alpha/kernel/module.c
723+++ b/arch/alpha/kernel/module.c
724@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
725
726 /* The small sections were sorted to the end of the segment.
727 The following should definitely cover them. */
728- gp = (u64)me->module_core + me->core_size - 0x8000;
729+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
730 got = sechdrs[me->arch.gotsecindex].sh_addr;
731
732 for (i = 0; i < n; i++) {
733diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
734index e51f578..16c64a3 100644
735--- a/arch/alpha/kernel/osf_sys.c
736+++ b/arch/alpha/kernel/osf_sys.c
737@@ -1296,10 +1296,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
738 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
739
740 static unsigned long
741-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
742- unsigned long limit)
743+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
744+ unsigned long limit, unsigned long flags)
745 {
746 struct vm_unmapped_area_info info;
747+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
748
749 info.flags = 0;
750 info.length = len;
751@@ -1307,6 +1308,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
752 info.high_limit = limit;
753 info.align_mask = 0;
754 info.align_offset = 0;
755+ info.threadstack_offset = offset;
756 return vm_unmapped_area(&info);
757 }
758
759@@ -1339,20 +1341,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
760 merely specific addresses, but regions of memory -- perhaps
761 this feature should be incorporated into all ports? */
762
763+#ifdef CONFIG_PAX_RANDMMAP
764+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
765+#endif
766+
767 if (addr) {
768- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
769+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
770 if (addr != (unsigned long) -ENOMEM)
771 return addr;
772 }
773
774 /* Next, try allocating at TASK_UNMAPPED_BASE. */
775- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
776- len, limit);
777+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
778+
779 if (addr != (unsigned long) -ENOMEM)
780 return addr;
781
782 /* Finally, try allocating in low memory. */
783- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
784+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
785
786 return addr;
787 }
788diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
789index 9d0ac09..479a962 100644
790--- a/arch/alpha/mm/fault.c
791+++ b/arch/alpha/mm/fault.c
792@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
793 __reload_thread(pcb);
794 }
795
796+#ifdef CONFIG_PAX_PAGEEXEC
797+/*
798+ * PaX: decide what to do with offenders (regs->pc = fault address)
799+ *
800+ * returns 1 when task should be killed
801+ * 2 when patched PLT trampoline was detected
802+ * 3 when unpatched PLT trampoline was detected
803+ */
804+static int pax_handle_fetch_fault(struct pt_regs *regs)
805+{
806+
807+#ifdef CONFIG_PAX_EMUPLT
808+ int err;
809+
810+ do { /* PaX: patched PLT emulation #1 */
811+ unsigned int ldah, ldq, jmp;
812+
813+ err = get_user(ldah, (unsigned int *)regs->pc);
814+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
815+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
816+
817+ if (err)
818+ break;
819+
820+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
821+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
822+ jmp == 0x6BFB0000U)
823+ {
824+ unsigned long r27, addr;
825+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
826+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
827+
828+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
829+ err = get_user(r27, (unsigned long *)addr);
830+ if (err)
831+ break;
832+
833+ regs->r27 = r27;
834+ regs->pc = r27;
835+ return 2;
836+ }
837+ } while (0);
838+
839+ do { /* PaX: patched PLT emulation #2 */
840+ unsigned int ldah, lda, br;
841+
842+ err = get_user(ldah, (unsigned int *)regs->pc);
843+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
844+ err |= get_user(br, (unsigned int *)(regs->pc+8));
845+
846+ if (err)
847+ break;
848+
849+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
850+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
851+ (br & 0xFFE00000U) == 0xC3E00000U)
852+ {
853+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
854+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
855+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
856+
857+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
858+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
859+ return 2;
860+ }
861+ } while (0);
862+
863+ do { /* PaX: unpatched PLT emulation */
864+ unsigned int br;
865+
866+ err = get_user(br, (unsigned int *)regs->pc);
867+
868+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
869+ unsigned int br2, ldq, nop, jmp;
870+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
871+
872+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
873+ err = get_user(br2, (unsigned int *)addr);
874+ err |= get_user(ldq, (unsigned int *)(addr+4));
875+ err |= get_user(nop, (unsigned int *)(addr+8));
876+ err |= get_user(jmp, (unsigned int *)(addr+12));
877+ err |= get_user(resolver, (unsigned long *)(addr+16));
878+
879+ if (err)
880+ break;
881+
882+ if (br2 == 0xC3600000U &&
883+ ldq == 0xA77B000CU &&
884+ nop == 0x47FF041FU &&
885+ jmp == 0x6B7B0000U)
886+ {
887+ regs->r28 = regs->pc+4;
888+ regs->r27 = addr+16;
889+ regs->pc = resolver;
890+ return 3;
891+ }
892+ }
893+ } while (0);
894+#endif
895+
896+ return 1;
897+}
898+
899+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
900+{
901+ unsigned long i;
902+
903+ printk(KERN_ERR "PAX: bytes at PC: ");
904+ for (i = 0; i < 5; i++) {
905+ unsigned int c;
906+ if (get_user(c, (unsigned int *)pc+i))
907+ printk(KERN_CONT "???????? ");
908+ else
909+ printk(KERN_CONT "%08x ", c);
910+ }
911+ printk("\n");
912+}
913+#endif
914
915 /*
916 * This routine handles page faults. It determines the address,
917@@ -133,8 +251,29 @@ retry:
918 good_area:
919 si_code = SEGV_ACCERR;
920 if (cause < 0) {
921- if (!(vma->vm_flags & VM_EXEC))
922+ if (!(vma->vm_flags & VM_EXEC)) {
923+
924+#ifdef CONFIG_PAX_PAGEEXEC
925+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
926+ goto bad_area;
927+
928+ up_read(&mm->mmap_sem);
929+ switch (pax_handle_fetch_fault(regs)) {
930+
931+#ifdef CONFIG_PAX_EMUPLT
932+ case 2:
933+ case 3:
934+ return;
935+#endif
936+
937+ }
938+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
939+ do_group_exit(SIGKILL);
940+#else
941 goto bad_area;
942+#endif
943+
944+ }
945 } else if (!cause) {
946 /* Allow reads even for write-only mappings */
947 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
948diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
949index cf4c0c9..a87ecf5 100644
950--- a/arch/arm/Kconfig
951+++ b/arch/arm/Kconfig
952@@ -1735,7 +1735,7 @@ config ALIGNMENT_TRAP
953
954 config UACCESS_WITH_MEMCPY
955 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
956- depends on MMU
957+ depends on MMU && !PAX_MEMORY_UDEREF
958 default y if CPU_FEROCEON
959 help
960 Implement faster copy_to_user and clear_user methods for CPU
961@@ -1999,6 +1999,7 @@ config XIP_PHYS_ADDR
962 config KEXEC
963 bool "Kexec system call (EXPERIMENTAL)"
964 depends on (!SMP || PM_SLEEP_SMP)
965+ depends on !GRKERNSEC_KMEM
966 help
967 kexec is a system call that implements the ability to shutdown your
968 current kernel, and to start another kernel. It is like a reboot
969diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
970index e22c119..abe7041 100644
971--- a/arch/arm/include/asm/atomic.h
972+++ b/arch/arm/include/asm/atomic.h
973@@ -18,17 +18,41 @@
974 #include <asm/barrier.h>
975 #include <asm/cmpxchg.h>
976
977+#ifdef CONFIG_GENERIC_ATOMIC64
978+#include <asm-generic/atomic64.h>
979+#endif
980+
981 #define ATOMIC_INIT(i) { (i) }
982
983 #ifdef __KERNEL__
984
985+#ifdef CONFIG_THUMB2_KERNEL
986+#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
987+#else
988+#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
989+#endif
990+
991+#define _ASM_EXTABLE(from, to) \
992+" .pushsection __ex_table,\"a\"\n"\
993+" .align 3\n" \
994+" .long " #from ", " #to"\n" \
995+" .popsection"
996+
997 /*
998 * On ARM, ordinary assignment (str instruction) doesn't clear the local
999 * strex/ldrex monitor on some implementations. The reason we can use it for
1000 * atomic_set() is the clrex or dummy strex done on every exception return.
1001 */
1002 #define atomic_read(v) ACCESS_ONCE((v)->counter)
1003+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
1004+{
1005+ return ACCESS_ONCE(v->counter);
1006+}
1007 #define atomic_set(v,i) (((v)->counter) = (i))
1008+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
1009+{
1010+ v->counter = i;
1011+}
1012
1013 #if __LINUX_ARM_ARCH__ >= 6
1014
1015@@ -38,26 +62,50 @@
1016 * to ensure that the update happens.
1017 */
1018
1019-#define ATOMIC_OP(op, c_op, asm_op) \
1020-static inline void atomic_##op(int i, atomic_t *v) \
1021+#ifdef CONFIG_PAX_REFCOUNT
1022+#define __OVERFLOW_POST \
1023+ " bvc 3f\n" \
1024+ "2: " REFCOUNT_TRAP_INSN "\n"\
1025+ "3:\n"
1026+#define __OVERFLOW_POST_RETURN \
1027+ " bvc 3f\n" \
1028+" mov %0, %1\n" \
1029+ "2: " REFCOUNT_TRAP_INSN "\n"\
1030+ "3:\n"
1031+#define __OVERFLOW_EXTABLE \
1032+ "4:\n" \
1033+ _ASM_EXTABLE(2b, 4b)
1034+#else
1035+#define __OVERFLOW_POST
1036+#define __OVERFLOW_POST_RETURN
1037+#define __OVERFLOW_EXTABLE
1038+#endif
1039+
1040+#define __ATOMIC_OP(op, suffix, c_op, asm_op, post_op, extable) \
1041+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1042 { \
1043 unsigned long tmp; \
1044 int result; \
1045 \
1046 prefetchw(&v->counter); \
1047- __asm__ __volatile__("@ atomic_" #op "\n" \
1048+ __asm__ __volatile__("@ atomic_" #op #suffix "\n" \
1049 "1: ldrex %0, [%3]\n" \
1050 " " #asm_op " %0, %0, %4\n" \
1051+ post_op \
1052 " strex %1, %0, [%3]\n" \
1053 " teq %1, #0\n" \
1054-" bne 1b" \
1055+" bne 1b\n" \
1056+ extable \
1057 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1058 : "r" (&v->counter), "Ir" (i) \
1059 : "cc"); \
1060 } \
1061
1062-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1063-static inline int atomic_##op##_return(int i, atomic_t *v) \
1064+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, _unchecked, c_op, asm_op, , )\
1065+ __ATOMIC_OP(op, , c_op, asm_op##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1066+
1067+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op, post_op, extable) \
1068+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1069 { \
1070 unsigned long tmp; \
1071 int result; \
1072@@ -65,12 +113,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1073 smp_mb(); \
1074 prefetchw(&v->counter); \
1075 \
1076- __asm__ __volatile__("@ atomic_" #op "_return\n" \
1077+ __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n" \
1078 "1: ldrex %0, [%3]\n" \
1079 " " #asm_op " %0, %0, %4\n" \
1080+ post_op \
1081 " strex %1, %0, [%3]\n" \
1082 " teq %1, #0\n" \
1083-" bne 1b" \
1084+" bne 1b\n" \
1085+ extable \
1086 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1087 : "r" (&v->counter), "Ir" (i) \
1088 : "cc"); \
1089@@ -80,6 +130,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1090 return result; \
1091 }
1092
1093+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op, , )\
1094+ __ATOMIC_OP_RETURN(op, , c_op, asm_op##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1095+
1096 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1097 {
1098 int oldval;
1099@@ -115,12 +168,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1100 __asm__ __volatile__ ("@ atomic_add_unless\n"
1101 "1: ldrex %0, [%4]\n"
1102 " teq %0, %5\n"
1103-" beq 2f\n"
1104-" add %1, %0, %6\n"
1105+" beq 4f\n"
1106+" adds %1, %0, %6\n"
1107+
1108+#ifdef CONFIG_PAX_REFCOUNT
1109+" bvc 3f\n"
1110+"2: " REFCOUNT_TRAP_INSN "\n"
1111+"3:\n"
1112+#endif
1113+
1114 " strex %2, %1, [%4]\n"
1115 " teq %2, #0\n"
1116 " bne 1b\n"
1117-"2:"
1118+"4:"
1119+
1120+#ifdef CONFIG_PAX_REFCOUNT
1121+ _ASM_EXTABLE(2b, 4b)
1122+#endif
1123+
1124 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1125 : "r" (&v->counter), "r" (u), "r" (a)
1126 : "cc");
1127@@ -131,14 +196,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1128 return oldval;
1129 }
1130
1131+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1132+{
1133+ unsigned long oldval, res;
1134+
1135+ smp_mb();
1136+
1137+ do {
1138+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1139+ "ldrex %1, [%3]\n"
1140+ "mov %0, #0\n"
1141+ "teq %1, %4\n"
1142+ "strexeq %0, %5, [%3]\n"
1143+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1144+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1145+ : "cc");
1146+ } while (res);
1147+
1148+ smp_mb();
1149+
1150+ return oldval;
1151+}
1152+
1153 #else /* ARM_ARCH_6 */
1154
1155 #ifdef CONFIG_SMP
1156 #error SMP not supported on pre-ARMv6 CPUs
1157 #endif
1158
1159-#define ATOMIC_OP(op, c_op, asm_op) \
1160-static inline void atomic_##op(int i, atomic_t *v) \
1161+#define __ATOMIC_OP(op, suffix, c_op, asm_op) \
1162+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1163 { \
1164 unsigned long flags; \
1165 \
1166@@ -147,8 +234,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
1167 raw_local_irq_restore(flags); \
1168 } \
1169
1170-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1171-static inline int atomic_##op##_return(int i, atomic_t *v) \
1172+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op) \
1173+ __ATOMIC_OP(op, _unchecked, c_op, asm_op)
1174+
1175+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \
1176+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1177 { \
1178 unsigned long flags; \
1179 int val; \
1180@@ -161,6 +251,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1181 return val; \
1182 }
1183
1184+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\
1185+ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)
1186+
1187 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1188 {
1189 int ret;
1190@@ -175,6 +268,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1191 return ret;
1192 }
1193
1194+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1195+{
1196+ return atomic_cmpxchg((atomic_t *)v, old, new);
1197+}
1198+
1199 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1200 {
1201 int c, old;
1202@@ -196,16 +294,38 @@ ATOMIC_OPS(sub, -=, sub)
1203
1204 #undef ATOMIC_OPS
1205 #undef ATOMIC_OP_RETURN
1206+#undef __ATOMIC_OP_RETURN
1207 #undef ATOMIC_OP
1208+#undef __ATOMIC_OP
1209
1210 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1211+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1212+{
1213+ return xchg(&v->counter, new);
1214+}
1215
1216 #define atomic_inc(v) atomic_add(1, v)
1217+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1218+{
1219+ atomic_add_unchecked(1, v);
1220+}
1221 #define atomic_dec(v) atomic_sub(1, v)
1222+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1223+{
1224+ atomic_sub_unchecked(1, v);
1225+}
1226
1227 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1228+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1229+{
1230+ return atomic_add_return_unchecked(1, v) == 0;
1231+}
1232 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1233 #define atomic_inc_return(v) (atomic_add_return(1, v))
1234+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1235+{
1236+ return atomic_add_return_unchecked(1, v);
1237+}
1238 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1239 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1240
1241@@ -216,6 +336,14 @@ typedef struct {
1242 long long counter;
1243 } atomic64_t;
1244
1245+#ifdef CONFIG_PAX_REFCOUNT
1246+typedef struct {
1247+ long long counter;
1248+} atomic64_unchecked_t;
1249+#else
1250+typedef atomic64_t atomic64_unchecked_t;
1251+#endif
1252+
1253 #define ATOMIC64_INIT(i) { (i) }
1254
1255 #ifdef CONFIG_ARM_LPAE
1256@@ -232,6 +360,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1257 return result;
1258 }
1259
1260+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1261+{
1262+ long long result;
1263+
1264+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1265+" ldrd %0, %H0, [%1]"
1266+ : "=&r" (result)
1267+ : "r" (&v->counter), "Qo" (v->counter)
1268+ );
1269+
1270+ return result;
1271+}
1272+
1273 static inline void atomic64_set(atomic64_t *v, long long i)
1274 {
1275 __asm__ __volatile__("@ atomic64_set\n"
1276@@ -240,6 +381,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1277 : "r" (&v->counter), "r" (i)
1278 );
1279 }
1280+
1281+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1282+{
1283+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1284+" strd %2, %H2, [%1]"
1285+ : "=Qo" (v->counter)
1286+ : "r" (&v->counter), "r" (i)
1287+ );
1288+}
1289 #else
1290 static inline long long atomic64_read(const atomic64_t *v)
1291 {
1292@@ -254,6 +404,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1293 return result;
1294 }
1295
1296+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1297+{
1298+ long long result;
1299+
1300+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1301+" ldrexd %0, %H0, [%1]"
1302+ : "=&r" (result)
1303+ : "r" (&v->counter), "Qo" (v->counter)
1304+ );
1305+
1306+ return result;
1307+}
1308+
1309 static inline void atomic64_set(atomic64_t *v, long long i)
1310 {
1311 long long tmp;
1312@@ -268,29 +431,57 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1313 : "r" (&v->counter), "r" (i)
1314 : "cc");
1315 }
1316+
1317+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1318+{
1319+ long long tmp;
1320+
1321+ prefetchw(&v->counter);
1322+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1323+"1: ldrexd %0, %H0, [%2]\n"
1324+" strexd %0, %3, %H3, [%2]\n"
1325+" teq %0, #0\n"
1326+" bne 1b"
1327+ : "=&r" (tmp), "=Qo" (v->counter)
1328+ : "r" (&v->counter), "r" (i)
1329+ : "cc");
1330+}
1331 #endif
1332
1333-#define ATOMIC64_OP(op, op1, op2) \
1334-static inline void atomic64_##op(long long i, atomic64_t *v) \
1335+#undef __OVERFLOW_POST_RETURN
1336+#define __OVERFLOW_POST_RETURN \
1337+ " bvc 3f\n" \
1338+" mov %0, %1\n" \
1339+" mov %H0, %H1\n" \
1340+ "2: " REFCOUNT_TRAP_INSN "\n"\
1341+ "3:\n"
1342+
1343+#define __ATOMIC64_OP(op, suffix, op1, op2, post_op, extable) \
1344+static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\
1345 { \
1346 long long result; \
1347 unsigned long tmp; \
1348 \
1349 prefetchw(&v->counter); \
1350- __asm__ __volatile__("@ atomic64_" #op "\n" \
1351+ __asm__ __volatile__("@ atomic64_" #op #suffix "\n" \
1352 "1: ldrexd %0, %H0, [%3]\n" \
1353 " " #op1 " %Q0, %Q0, %Q4\n" \
1354 " " #op2 " %R0, %R0, %R4\n" \
1355+ post_op \
1356 " strexd %1, %0, %H0, [%3]\n" \
1357 " teq %1, #0\n" \
1358-" bne 1b" \
1359+" bne 1b\n" \
1360+ extable \
1361 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1362 : "r" (&v->counter), "r" (i) \
1363 : "cc"); \
1364 } \
1365
1366-#define ATOMIC64_OP_RETURN(op, op1, op2) \
1367-static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1368+#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, _unchecked, op1, op2, , ) \
1369+ __ATOMIC64_OP(op, , op1, op2##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1370+
1371+#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2, post_op, extable) \
1372+static inline long long atomic64_##op##_return##suffix(long long i, atomic64##suffix##_t *v) \
1373 { \
1374 long long result; \
1375 unsigned long tmp; \
1376@@ -298,13 +489,15 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1377 smp_mb(); \
1378 prefetchw(&v->counter); \
1379 \
1380- __asm__ __volatile__("@ atomic64_" #op "_return\n" \
1381+ __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n" \
1382 "1: ldrexd %0, %H0, [%3]\n" \
1383 " " #op1 " %Q0, %Q0, %Q4\n" \
1384 " " #op2 " %R0, %R0, %R4\n" \
1385+ post_op \
1386 " strexd %1, %0, %H0, [%3]\n" \
1387 " teq %1, #0\n" \
1388-" bne 1b" \
1389+" bne 1b\n" \
1390+ extable \
1391 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1392 : "r" (&v->counter), "r" (i) \
1393 : "cc"); \
1394@@ -314,6 +507,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1395 return result; \
1396 }
1397
1398+#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2, , ) \
1399+ __ATOMIC64_OP_RETURN(op, , op1, op2##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1400+
1401 #define ATOMIC64_OPS(op, op1, op2) \
1402 ATOMIC64_OP(op, op1, op2) \
1403 ATOMIC64_OP_RETURN(op, op1, op2)
1404@@ -323,7 +519,12 @@ ATOMIC64_OPS(sub, subs, sbc)
1405
1406 #undef ATOMIC64_OPS
1407 #undef ATOMIC64_OP_RETURN
1408+#undef __ATOMIC64_OP_RETURN
1409 #undef ATOMIC64_OP
1410+#undef __ATOMIC64_OP
1411+#undef __OVERFLOW_EXTABLE
1412+#undef __OVERFLOW_POST_RETURN
1413+#undef __OVERFLOW_POST
1414
1415 static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1416 long long new)
1417@@ -351,6 +552,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1418 return oldval;
1419 }
1420
1421+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1422+ long long new)
1423+{
1424+ long long oldval;
1425+ unsigned long res;
1426+
1427+ smp_mb();
1428+
1429+ do {
1430+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1431+ "ldrexd %1, %H1, [%3]\n"
1432+ "mov %0, #0\n"
1433+ "teq %1, %4\n"
1434+ "teqeq %H1, %H4\n"
1435+ "strexdeq %0, %5, %H5, [%3]"
1436+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1437+ : "r" (&ptr->counter), "r" (old), "r" (new)
1438+ : "cc");
1439+ } while (res);
1440+
1441+ smp_mb();
1442+
1443+ return oldval;
1444+}
1445+
1446 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1447 {
1448 long long result;
1449@@ -376,21 +602,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1450 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1451 {
1452 long long result;
1453- unsigned long tmp;
1454+ u64 tmp;
1455
1456 smp_mb();
1457 prefetchw(&v->counter);
1458
1459 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1460-"1: ldrexd %0, %H0, [%3]\n"
1461-" subs %Q0, %Q0, #1\n"
1462-" sbc %R0, %R0, #0\n"
1463+"1: ldrexd %1, %H1, [%3]\n"
1464+" subs %Q0, %Q1, #1\n"
1465+" sbcs %R0, %R1, #0\n"
1466+
1467+#ifdef CONFIG_PAX_REFCOUNT
1468+" bvc 3f\n"
1469+" mov %Q0, %Q1\n"
1470+" mov %R0, %R1\n"
1471+"2: " REFCOUNT_TRAP_INSN "\n"
1472+"3:\n"
1473+#endif
1474+
1475 " teq %R0, #0\n"
1476-" bmi 2f\n"
1477+" bmi 4f\n"
1478 " strexd %1, %0, %H0, [%3]\n"
1479 " teq %1, #0\n"
1480 " bne 1b\n"
1481-"2:"
1482+"4:\n"
1483+
1484+#ifdef CONFIG_PAX_REFCOUNT
1485+ _ASM_EXTABLE(2b, 4b)
1486+#endif
1487+
1488 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1489 : "r" (&v->counter)
1490 : "cc");
1491@@ -414,13 +654,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1492 " teq %0, %5\n"
1493 " teqeq %H0, %H5\n"
1494 " moveq %1, #0\n"
1495-" beq 2f\n"
1496+" beq 4f\n"
1497 " adds %Q0, %Q0, %Q6\n"
1498-" adc %R0, %R0, %R6\n"
1499+" adcs %R0, %R0, %R6\n"
1500+
1501+#ifdef CONFIG_PAX_REFCOUNT
1502+" bvc 3f\n"
1503+"2: " REFCOUNT_TRAP_INSN "\n"
1504+"3:\n"
1505+#endif
1506+
1507 " strexd %2, %0, %H0, [%4]\n"
1508 " teq %2, #0\n"
1509 " bne 1b\n"
1510-"2:"
1511+"4:\n"
1512+
1513+#ifdef CONFIG_PAX_REFCOUNT
1514+ _ASM_EXTABLE(2b, 4b)
1515+#endif
1516+
1517 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1518 : "r" (&v->counter), "r" (u), "r" (a)
1519 : "cc");
1520@@ -433,10 +685,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1521
1522 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1523 #define atomic64_inc(v) atomic64_add(1LL, (v))
1524+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1525 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1526+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1527 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1528 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1529 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1530+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1531 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1532 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1533 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1534diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
1535index d2f81e6..3c4dba5 100644
1536--- a/arch/arm/include/asm/barrier.h
1537+++ b/arch/arm/include/asm/barrier.h
1538@@ -67,7 +67,7 @@
1539 do { \
1540 compiletime_assert_atomic_type(*p); \
1541 smp_mb(); \
1542- ACCESS_ONCE(*p) = (v); \
1543+ ACCESS_ONCE_RW(*p) = (v); \
1544 } while (0)
1545
1546 #define smp_load_acquire(p) \
1547diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1548index 75fe66b..ba3dee4 100644
1549--- a/arch/arm/include/asm/cache.h
1550+++ b/arch/arm/include/asm/cache.h
1551@@ -4,8 +4,10 @@
1552 #ifndef __ASMARM_CACHE_H
1553 #define __ASMARM_CACHE_H
1554
1555+#include <linux/const.h>
1556+
1557 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1558-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1559+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1560
1561 /*
1562 * Memory returned by kmalloc() may be used for DMA, so we must make
1563@@ -24,5 +26,6 @@
1564 #endif
1565
1566 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1567+#define __read_only __attribute__ ((__section__(".data..read_only")))
1568
1569 #endif
1570diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1571index 2d46862..a35415b 100644
1572--- a/arch/arm/include/asm/cacheflush.h
1573+++ b/arch/arm/include/asm/cacheflush.h
1574@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1575 void (*dma_unmap_area)(const void *, size_t, int);
1576
1577 void (*dma_flush_range)(const void *, const void *);
1578-};
1579+} __no_const;
1580
1581 /*
1582 * Select the calling method
1583diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1584index 5233151..87a71fa 100644
1585--- a/arch/arm/include/asm/checksum.h
1586+++ b/arch/arm/include/asm/checksum.h
1587@@ -37,7 +37,19 @@ __wsum
1588 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1589
1590 __wsum
1591-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1592+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1593+
1594+static inline __wsum
1595+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1596+{
1597+ __wsum ret;
1598+ pax_open_userland();
1599+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1600+ pax_close_userland();
1601+ return ret;
1602+}
1603+
1604+
1605
1606 /*
1607 * Fold a partial checksum without adding pseudo headers
1608diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1609index abb2c37..96db950 100644
1610--- a/arch/arm/include/asm/cmpxchg.h
1611+++ b/arch/arm/include/asm/cmpxchg.h
1612@@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1613
1614 #define xchg(ptr,x) \
1615 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1616+#define xchg_unchecked(ptr,x) \
1617+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1618
1619 #include <asm-generic/cmpxchg-local.h>
1620
1621diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1622index 6ddbe44..b5e38b1a 100644
1623--- a/arch/arm/include/asm/domain.h
1624+++ b/arch/arm/include/asm/domain.h
1625@@ -48,18 +48,37 @@
1626 * Domain types
1627 */
1628 #define DOMAIN_NOACCESS 0
1629-#define DOMAIN_CLIENT 1
1630 #ifdef CONFIG_CPU_USE_DOMAINS
1631+#define DOMAIN_USERCLIENT 1
1632+#define DOMAIN_KERNELCLIENT 1
1633 #define DOMAIN_MANAGER 3
1634+#define DOMAIN_VECTORS DOMAIN_USER
1635 #else
1636+
1637+#ifdef CONFIG_PAX_KERNEXEC
1638 #define DOMAIN_MANAGER 1
1639+#define DOMAIN_KERNEXEC 3
1640+#else
1641+#define DOMAIN_MANAGER 1
1642+#endif
1643+
1644+#ifdef CONFIG_PAX_MEMORY_UDEREF
1645+#define DOMAIN_USERCLIENT 0
1646+#define DOMAIN_UDEREF 1
1647+#define DOMAIN_VECTORS DOMAIN_KERNEL
1648+#else
1649+#define DOMAIN_USERCLIENT 1
1650+#define DOMAIN_VECTORS DOMAIN_USER
1651+#endif
1652+#define DOMAIN_KERNELCLIENT 1
1653+
1654 #endif
1655
1656 #define domain_val(dom,type) ((type) << (2*(dom)))
1657
1658 #ifndef __ASSEMBLY__
1659
1660-#ifdef CONFIG_CPU_USE_DOMAINS
1661+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1662 static inline void set_domain(unsigned val)
1663 {
1664 asm volatile(
1665@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1666 isb();
1667 }
1668
1669-#define modify_domain(dom,type) \
1670- do { \
1671- struct thread_info *thread = current_thread_info(); \
1672- unsigned int domain = thread->cpu_domain; \
1673- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1674- thread->cpu_domain = domain | domain_val(dom, type); \
1675- set_domain(thread->cpu_domain); \
1676- } while (0)
1677-
1678+extern void modify_domain(unsigned int dom, unsigned int type);
1679 #else
1680 static inline void set_domain(unsigned val) { }
1681 static inline void modify_domain(unsigned dom, unsigned type) { }
1682diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1683index 674d03f..9a0bac0 100644
1684--- a/arch/arm/include/asm/elf.h
1685+++ b/arch/arm/include/asm/elf.h
1686@@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1687 the loader. We need to make sure that it is out of the way of the program
1688 that it will "exec", and that there is sufficient room for the brk. */
1689
1690-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1691+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1692+
1693+#ifdef CONFIG_PAX_ASLR
1694+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1695+
1696+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1697+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1698+#endif
1699
1700 /* When the program starts, a1 contains a pointer to a function to be
1701 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1702@@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1703 extern void elf_set_personality(const struct elf32_hdr *);
1704 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1705
1706-struct mm_struct;
1707-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1708-#define arch_randomize_brk arch_randomize_brk
1709-
1710 #ifdef CONFIG_MMU
1711 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1712 struct linux_binprm;
1713diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1714index de53547..52b9a28 100644
1715--- a/arch/arm/include/asm/fncpy.h
1716+++ b/arch/arm/include/asm/fncpy.h
1717@@ -81,7 +81,9 @@
1718 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1719 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1720 \
1721+ pax_open_kernel(); \
1722 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1723+ pax_close_kernel(); \
1724 flush_icache_range((unsigned long)(dest_buf), \
1725 (unsigned long)(dest_buf) + (size)); \
1726 \
1727diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1728index 53e69da..3fdc896 100644
1729--- a/arch/arm/include/asm/futex.h
1730+++ b/arch/arm/include/asm/futex.h
1731@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1732 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1733 return -EFAULT;
1734
1735+ pax_open_userland();
1736+
1737 smp_mb();
1738 /* Prefetching cannot fault */
1739 prefetchw(uaddr);
1740@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1741 : "cc", "memory");
1742 smp_mb();
1743
1744+ pax_close_userland();
1745+
1746 *uval = val;
1747 return ret;
1748 }
1749@@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1750 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1751 return -EFAULT;
1752
1753+ pax_open_userland();
1754+
1755 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1756 "1: " TUSER(ldr) " %1, [%4]\n"
1757 " teq %1, %2\n"
1758@@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1759 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1760 : "cc", "memory");
1761
1762+ pax_close_userland();
1763+
1764 *uval = val;
1765 return ret;
1766 }
1767@@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1768 return -EFAULT;
1769
1770 pagefault_disable(); /* implies preempt_disable() */
1771+ pax_open_userland();
1772
1773 switch (op) {
1774 case FUTEX_OP_SET:
1775@@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1776 ret = -ENOSYS;
1777 }
1778
1779+ pax_close_userland();
1780 pagefault_enable(); /* subsumes preempt_enable() */
1781
1782 if (!ret) {
1783diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1784index 83eb2f7..ed77159 100644
1785--- a/arch/arm/include/asm/kmap_types.h
1786+++ b/arch/arm/include/asm/kmap_types.h
1787@@ -4,6 +4,6 @@
1788 /*
1789 * This is the "bare minimum". AIO seems to require this.
1790 */
1791-#define KM_TYPE_NR 16
1792+#define KM_TYPE_NR 17
1793
1794 #endif
1795diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1796index 9e614a1..3302cca 100644
1797--- a/arch/arm/include/asm/mach/dma.h
1798+++ b/arch/arm/include/asm/mach/dma.h
1799@@ -22,7 +22,7 @@ struct dma_ops {
1800 int (*residue)(unsigned int, dma_t *); /* optional */
1801 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1802 const char *type;
1803-};
1804+} __do_const;
1805
1806 struct dma_struct {
1807 void *addr; /* single DMA address */
1808diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1809index f98c7f3..e5c626d 100644
1810--- a/arch/arm/include/asm/mach/map.h
1811+++ b/arch/arm/include/asm/mach/map.h
1812@@ -23,17 +23,19 @@ struct map_desc {
1813
1814 /* types 0-3 are defined in asm/io.h */
1815 enum {
1816- MT_UNCACHED = 4,
1817- MT_CACHECLEAN,
1818- MT_MINICLEAN,
1819+ MT_UNCACHED_RW = 4,
1820+ MT_CACHECLEAN_RO,
1821+ MT_MINICLEAN_RO,
1822 MT_LOW_VECTORS,
1823 MT_HIGH_VECTORS,
1824- MT_MEMORY_RWX,
1825+ __MT_MEMORY_RWX,
1826 MT_MEMORY_RW,
1827- MT_ROM,
1828- MT_MEMORY_RWX_NONCACHED,
1829+ MT_MEMORY_RX,
1830+ MT_ROM_RX,
1831+ MT_MEMORY_RW_NONCACHED,
1832+ MT_MEMORY_RX_NONCACHED,
1833 MT_MEMORY_RW_DTCM,
1834- MT_MEMORY_RWX_ITCM,
1835+ MT_MEMORY_RX_ITCM,
1836 MT_MEMORY_RW_SO,
1837 MT_MEMORY_DMA_READY,
1838 };
1839diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1840index 563b92f..689d58e 100644
1841--- a/arch/arm/include/asm/outercache.h
1842+++ b/arch/arm/include/asm/outercache.h
1843@@ -39,7 +39,7 @@ struct outer_cache_fns {
1844 /* This is an ARM L2C thing */
1845 void (*write_sec)(unsigned long, unsigned);
1846 void (*configure)(const struct l2x0_regs *);
1847-};
1848+} __no_const;
1849
1850 extern struct outer_cache_fns outer_cache;
1851
1852diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1853index 4355f0e..cd9168e 100644
1854--- a/arch/arm/include/asm/page.h
1855+++ b/arch/arm/include/asm/page.h
1856@@ -23,6 +23,7 @@
1857
1858 #else
1859
1860+#include <linux/compiler.h>
1861 #include <asm/glue.h>
1862
1863 /*
1864@@ -114,7 +115,7 @@ struct cpu_user_fns {
1865 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1866 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1867 unsigned long vaddr, struct vm_area_struct *vma);
1868-};
1869+} __no_const;
1870
1871 #ifdef MULTI_USER
1872 extern struct cpu_user_fns cpu_user;
1873diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1874index 19cfab5..3f5c7e9 100644
1875--- a/arch/arm/include/asm/pgalloc.h
1876+++ b/arch/arm/include/asm/pgalloc.h
1877@@ -17,6 +17,7 @@
1878 #include <asm/processor.h>
1879 #include <asm/cacheflush.h>
1880 #include <asm/tlbflush.h>
1881+#include <asm/system_info.h>
1882
1883 #define check_pgt_cache() do { } while (0)
1884
1885@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1886 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1887 }
1888
1889+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1890+{
1891+ pud_populate(mm, pud, pmd);
1892+}
1893+
1894 #else /* !CONFIG_ARM_LPAE */
1895
1896 /*
1897@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1898 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1899 #define pmd_free(mm, pmd) do { } while (0)
1900 #define pud_populate(mm,pmd,pte) BUG()
1901+#define pud_populate_kernel(mm,pmd,pte) BUG()
1902
1903 #endif /* CONFIG_ARM_LPAE */
1904
1905@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1906 __free_page(pte);
1907 }
1908
1909+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1910+{
1911+#ifdef CONFIG_ARM_LPAE
1912+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1913+#else
1914+ if (addr & SECTION_SIZE)
1915+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1916+ else
1917+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1918+#endif
1919+ flush_pmd_entry(pmdp);
1920+}
1921+
1922 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1923 pmdval_t prot)
1924 {
1925diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1926index 5e68278..1869bae 100644
1927--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1928+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1929@@ -27,7 +27,7 @@
1930 /*
1931 * - section
1932 */
1933-#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1934+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1935 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1936 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1937 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1938@@ -39,6 +39,7 @@
1939 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1940 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1941 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1942+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1943
1944 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1945 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1946@@ -68,6 +69,7 @@
1947 * - extended small page/tiny page
1948 */
1949 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1950+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1951 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1952 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1953 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1954diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1955index bfd662e..f6cbb02 100644
1956--- a/arch/arm/include/asm/pgtable-2level.h
1957+++ b/arch/arm/include/asm/pgtable-2level.h
1958@@ -127,6 +127,9 @@
1959 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1960 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1961
1962+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1963+#define L_PTE_PXN (_AT(pteval_t, 0))
1964+
1965 /*
1966 * These are the memory types, defined to be compatible with
1967 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1968diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1969index a745a2a..481350a 100644
1970--- a/arch/arm/include/asm/pgtable-3level.h
1971+++ b/arch/arm/include/asm/pgtable-3level.h
1972@@ -80,6 +80,7 @@
1973 #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
1974 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1975 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1976+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1977 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1978 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
1979 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
1980@@ -91,10 +92,12 @@
1981 #define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
1982 #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
1983 #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
1984+#define PMD_SECT_RDONLY PMD_SECT_AP2
1985
1986 /*
1987 * To be used in assembly code with the upper page attributes.
1988 */
1989+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1990 #define L_PTE_XN_HIGH (1 << (54 - 32))
1991 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1992
1993diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1994index f403541..b10df68 100644
1995--- a/arch/arm/include/asm/pgtable.h
1996+++ b/arch/arm/include/asm/pgtable.h
1997@@ -33,6 +33,9 @@
1998 #include <asm/pgtable-2level.h>
1999 #endif
2000
2001+#define ktla_ktva(addr) (addr)
2002+#define ktva_ktla(addr) (addr)
2003+
2004 /*
2005 * Just any arbitrary offset to the start of the vmalloc VM area: the
2006 * current 8MB value just means that there will be a 8MB "hole" after the
2007@@ -48,6 +51,9 @@
2008 #define LIBRARY_TEXT_START 0x0c000000
2009
2010 #ifndef __ASSEMBLY__
2011+extern pteval_t __supported_pte_mask;
2012+extern pmdval_t __supported_pmd_mask;
2013+
2014 extern void __pte_error(const char *file, int line, pte_t);
2015 extern void __pmd_error(const char *file, int line, pmd_t);
2016 extern void __pgd_error(const char *file, int line, pgd_t);
2017@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2018 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2019 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2020
2021+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2022+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2023+
2024+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2025+#include <asm/domain.h>
2026+#include <linux/thread_info.h>
2027+#include <linux/preempt.h>
2028+
2029+static inline int test_domain(int domain, int domaintype)
2030+{
2031+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2032+}
2033+#endif
2034+
2035+#ifdef CONFIG_PAX_KERNEXEC
2036+static inline unsigned long pax_open_kernel(void) {
2037+#ifdef CONFIG_ARM_LPAE
2038+ /* TODO */
2039+#else
2040+ preempt_disable();
2041+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2042+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2043+#endif
2044+ return 0;
2045+}
2046+
2047+static inline unsigned long pax_close_kernel(void) {
2048+#ifdef CONFIG_ARM_LPAE
2049+ /* TODO */
2050+#else
2051+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2052+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2053+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2054+ preempt_enable_no_resched();
2055+#endif
2056+ return 0;
2057+}
2058+#else
2059+static inline unsigned long pax_open_kernel(void) { return 0; }
2060+static inline unsigned long pax_close_kernel(void) { return 0; }
2061+#endif
2062+
2063 /*
2064 * This is the lowest virtual address we can permit any user space
2065 * mapping to be mapped at. This is particularly important for
2066@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2067 /*
2068 * The pgprot_* and protection_map entries will be fixed up in runtime
2069 * to include the cachable and bufferable bits based on memory policy,
2070- * as well as any architecture dependent bits like global/ASID and SMP
2071- * shared mapping bits.
2072+ * as well as any architecture dependent bits like global/ASID, PXN,
2073+ * and SMP shared mapping bits.
2074 */
2075 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2076
2077@@ -307,7 +355,7 @@ static inline pte_t pte_mknexec(pte_t pte)
2078 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2079 {
2080 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2081- L_PTE_NONE | L_PTE_VALID;
2082+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2083 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2084 return pte;
2085 }
2086diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2087index c25ef3e..735f14b 100644
2088--- a/arch/arm/include/asm/psci.h
2089+++ b/arch/arm/include/asm/psci.h
2090@@ -32,7 +32,7 @@ struct psci_operations {
2091 int (*affinity_info)(unsigned long target_affinity,
2092 unsigned long lowest_affinity_level);
2093 int (*migrate_info_type)(void);
2094-};
2095+} __no_const;
2096
2097 extern struct psci_operations psci_ops;
2098 extern struct smp_operations psci_smp_ops;
2099diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2100index 18f5a55..5072a40 100644
2101--- a/arch/arm/include/asm/smp.h
2102+++ b/arch/arm/include/asm/smp.h
2103@@ -107,7 +107,7 @@ struct smp_operations {
2104 int (*cpu_disable)(unsigned int cpu);
2105 #endif
2106 #endif
2107-};
2108+} __no_const;
2109
2110 struct of_cpu_method {
2111 const char *method;
2112diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2113index 72812a1..335f4f3 100644
2114--- a/arch/arm/include/asm/thread_info.h
2115+++ b/arch/arm/include/asm/thread_info.h
2116@@ -77,9 +77,9 @@ struct thread_info {
2117 .flags = 0, \
2118 .preempt_count = INIT_PREEMPT_COUNT, \
2119 .addr_limit = KERNEL_DS, \
2120- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2121- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2122- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2123+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2124+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2125+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2126 }
2127
2128 #define init_thread_info (init_thread_union.thread_info)
2129@@ -155,7 +155,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2130 #define TIF_SYSCALL_AUDIT 9
2131 #define TIF_SYSCALL_TRACEPOINT 10
2132 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2133-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2134+/* within 8 bits of TIF_SYSCALL_TRACE
2135+ * to meet flexible second operand requirements
2136+ */
2137+#define TIF_GRSEC_SETXID 12
2138+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2139 #define TIF_USING_IWMMXT 17
2140 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2141 #define TIF_RESTORE_SIGMASK 20
2142@@ -169,10 +173,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2143 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2144 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2145 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2146+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2147
2148 /* Checks for any syscall work in entry-common.S */
2149 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2150- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2151+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2152
2153 /*
2154 * Change these and you break ASM code in entry-common.S
2155diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
2156index 5f833f7..76e6644 100644
2157--- a/arch/arm/include/asm/tls.h
2158+++ b/arch/arm/include/asm/tls.h
2159@@ -3,6 +3,7 @@
2160
2161 #include <linux/compiler.h>
2162 #include <asm/thread_info.h>
2163+#include <asm/pgtable.h>
2164
2165 #ifdef __ASSEMBLY__
2166 #include <asm/asm-offsets.h>
2167@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
2168 * at 0xffff0fe0 must be used instead. (see
2169 * entry-armv.S for details)
2170 */
2171+ pax_open_kernel();
2172 *((unsigned int *)0xffff0ff0) = val;
2173+ pax_close_kernel();
2174 #endif
2175 }
2176
2177diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2178index ce0786e..a80c264 100644
2179--- a/arch/arm/include/asm/uaccess.h
2180+++ b/arch/arm/include/asm/uaccess.h
2181@@ -18,6 +18,7 @@
2182 #include <asm/domain.h>
2183 #include <asm/unified.h>
2184 #include <asm/compiler.h>
2185+#include <asm/pgtable.h>
2186
2187 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2188 #include <asm-generic/uaccess-unaligned.h>
2189@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2190 static inline void set_fs(mm_segment_t fs)
2191 {
2192 current_thread_info()->addr_limit = fs;
2193- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2194+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2195 }
2196
2197 #define segment_eq(a, b) ((a) == (b))
2198
2199+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2200+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2201+
2202+static inline void pax_open_userland(void)
2203+{
2204+
2205+#ifdef CONFIG_PAX_MEMORY_UDEREF
2206+ if (segment_eq(get_fs(), USER_DS)) {
2207+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2208+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2209+ }
2210+#endif
2211+
2212+}
2213+
2214+static inline void pax_close_userland(void)
2215+{
2216+
2217+#ifdef CONFIG_PAX_MEMORY_UDEREF
2218+ if (segment_eq(get_fs(), USER_DS)) {
2219+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2220+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2221+ }
2222+#endif
2223+
2224+}
2225+
2226 #define __addr_ok(addr) ({ \
2227 unsigned long flag; \
2228 __asm__("cmp %2, %0; movlo %0, #0" \
2229@@ -198,8 +226,12 @@ extern int __get_user_64t_4(void *);
2230
2231 #define get_user(x, p) \
2232 ({ \
2233+ int __e; \
2234 might_fault(); \
2235- __get_user_check(x, p); \
2236+ pax_open_userland(); \
2237+ __e = __get_user_check((x), (p)); \
2238+ pax_close_userland(); \
2239+ __e; \
2240 })
2241
2242 extern int __put_user_1(void *, unsigned int);
2243@@ -244,8 +276,12 @@ extern int __put_user_8(void *, unsigned long long);
2244
2245 #define put_user(x, p) \
2246 ({ \
2247+ int __e; \
2248 might_fault(); \
2249- __put_user_check(x, p); \
2250+ pax_open_userland(); \
2251+ __e = __put_user_check((x), (p)); \
2252+ pax_close_userland(); \
2253+ __e; \
2254 })
2255
2256 #else /* CONFIG_MMU */
2257@@ -269,6 +305,7 @@ static inline void set_fs(mm_segment_t fs)
2258
2259 #endif /* CONFIG_MMU */
2260
2261+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
2262 #define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
2263
2264 #define user_addr_max() \
2265@@ -286,13 +323,17 @@ static inline void set_fs(mm_segment_t fs)
2266 #define __get_user(x, ptr) \
2267 ({ \
2268 long __gu_err = 0; \
2269+ pax_open_userland(); \
2270 __get_user_err((x), (ptr), __gu_err); \
2271+ pax_close_userland(); \
2272 __gu_err; \
2273 })
2274
2275 #define __get_user_error(x, ptr, err) \
2276 ({ \
2277+ pax_open_userland(); \
2278 __get_user_err((x), (ptr), err); \
2279+ pax_close_userland(); \
2280 (void) 0; \
2281 })
2282
2283@@ -368,13 +409,17 @@ do { \
2284 #define __put_user(x, ptr) \
2285 ({ \
2286 long __pu_err = 0; \
2287+ pax_open_userland(); \
2288 __put_user_err((x), (ptr), __pu_err); \
2289+ pax_close_userland(); \
2290 __pu_err; \
2291 })
2292
2293 #define __put_user_error(x, ptr, err) \
2294 ({ \
2295+ pax_open_userland(); \
2296 __put_user_err((x), (ptr), err); \
2297+ pax_close_userland(); \
2298 (void) 0; \
2299 })
2300
2301@@ -474,11 +519,44 @@ do { \
2302
2303
2304 #ifdef CONFIG_MMU
2305-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2306-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2307+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2308+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2309+
2310+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2311+{
2312+ unsigned long ret;
2313+
2314+ check_object_size(to, n, false);
2315+ pax_open_userland();
2316+ ret = ___copy_from_user(to, from, n);
2317+ pax_close_userland();
2318+ return ret;
2319+}
2320+
2321+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2322+{
2323+ unsigned long ret;
2324+
2325+ check_object_size(from, n, true);
2326+ pax_open_userland();
2327+ ret = ___copy_to_user(to, from, n);
2328+ pax_close_userland();
2329+ return ret;
2330+}
2331+
2332 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2333-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2334+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2335 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2336+
2337+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2338+{
2339+ unsigned long ret;
2340+ pax_open_userland();
2341+ ret = ___clear_user(addr, n);
2342+ pax_close_userland();
2343+ return ret;
2344+}
2345+
2346 #else
2347 #define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
2348 #define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
2349@@ -487,6 +565,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2350
2351 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2352 {
2353+ if ((long)n < 0)
2354+ return n;
2355+
2356 if (access_ok(VERIFY_READ, from, n))
2357 n = __copy_from_user(to, from, n);
2358 else /* security hole - plug it */
2359@@ -496,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2360
2361 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2362 {
2363+ if ((long)n < 0)
2364+ return n;
2365+
2366 if (access_ok(VERIFY_WRITE, to, n))
2367 n = __copy_to_user(to, from, n);
2368 return n;
2369diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2370index 5af0ed1..cea83883 100644
2371--- a/arch/arm/include/uapi/asm/ptrace.h
2372+++ b/arch/arm/include/uapi/asm/ptrace.h
2373@@ -92,7 +92,7 @@
2374 * ARMv7 groups of PSR bits
2375 */
2376 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2377-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2378+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2379 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2380 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2381
2382diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2383index a88671c..1cc895e 100644
2384--- a/arch/arm/kernel/armksyms.c
2385+++ b/arch/arm/kernel/armksyms.c
2386@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2387
2388 /* networking */
2389 EXPORT_SYMBOL(csum_partial);
2390-EXPORT_SYMBOL(csum_partial_copy_from_user);
2391+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2392 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2393 EXPORT_SYMBOL(__csum_ipv6_magic);
2394
2395@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
2396 #ifdef CONFIG_MMU
2397 EXPORT_SYMBOL(copy_page);
2398
2399-EXPORT_SYMBOL(__copy_from_user);
2400-EXPORT_SYMBOL(__copy_to_user);
2401-EXPORT_SYMBOL(__clear_user);
2402+EXPORT_SYMBOL(___copy_from_user);
2403+EXPORT_SYMBOL(___copy_to_user);
2404+EXPORT_SYMBOL(___clear_user);
2405
2406 EXPORT_SYMBOL(__get_user_1);
2407 EXPORT_SYMBOL(__get_user_2);
2408diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2409index 672b219..4aa120a 100644
2410--- a/arch/arm/kernel/entry-armv.S
2411+++ b/arch/arm/kernel/entry-armv.S
2412@@ -48,6 +48,87 @@
2413 9997:
2414 .endm
2415
2416+ .macro pax_enter_kernel
2417+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2418+ @ make aligned space for saved DACR
2419+ sub sp, sp, #8
2420+ @ save regs
2421+ stmdb sp!, {r1, r2}
2422+ @ read DACR from cpu_domain into r1
2423+ mov r2, sp
2424+ @ assume 8K pages, since we have to split the immediate in two
2425+ bic r2, r2, #(0x1fc0)
2426+ bic r2, r2, #(0x3f)
2427+ ldr r1, [r2, #TI_CPU_DOMAIN]
2428+ @ store old DACR on stack
2429+ str r1, [sp, #8]
2430+#ifdef CONFIG_PAX_KERNEXEC
2431+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2432+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2433+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2434+#endif
2435+#ifdef CONFIG_PAX_MEMORY_UDEREF
2436+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2437+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2438+#endif
2439+ @ write r1 to current_thread_info()->cpu_domain
2440+ str r1, [r2, #TI_CPU_DOMAIN]
2441+ @ write r1 to DACR
2442+ mcr p15, 0, r1, c3, c0, 0
2443+ @ instruction sync
2444+ instr_sync
2445+ @ restore regs
2446+ ldmia sp!, {r1, r2}
2447+#endif
2448+ .endm
2449+
2450+ .macro pax_open_userland
2451+#ifdef CONFIG_PAX_MEMORY_UDEREF
2452+ @ save regs
2453+ stmdb sp!, {r0, r1}
2454+ @ read DACR from cpu_domain into r1
2455+ mov r0, sp
2456+ @ assume 8K pages, since we have to split the immediate in two
2457+ bic r0, r0, #(0x1fc0)
2458+ bic r0, r0, #(0x3f)
2459+ ldr r1, [r0, #TI_CPU_DOMAIN]
2460+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2461+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2462+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2463+ @ write r1 to current_thread_info()->cpu_domain
2464+ str r1, [r0, #TI_CPU_DOMAIN]
2465+ @ write r1 to DACR
2466+ mcr p15, 0, r1, c3, c0, 0
2467+ @ instruction sync
2468+ instr_sync
2469+ @ restore regs
2470+ ldmia sp!, {r0, r1}
2471+#endif
2472+ .endm
2473+
2474+ .macro pax_close_userland
2475+#ifdef CONFIG_PAX_MEMORY_UDEREF
2476+ @ save regs
2477+ stmdb sp!, {r0, r1}
2478+ @ read DACR from cpu_domain into r1
2479+ mov r0, sp
2480+ @ assume 8K pages, since we have to split the immediate in two
2481+ bic r0, r0, #(0x1fc0)
2482+ bic r0, r0, #(0x3f)
2483+ ldr r1, [r0, #TI_CPU_DOMAIN]
2484+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2485+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2486+ @ write r1 to current_thread_info()->cpu_domain
2487+ str r1, [r0, #TI_CPU_DOMAIN]
2488+ @ write r1 to DACR
2489+ mcr p15, 0, r1, c3, c0, 0
2490+ @ instruction sync
2491+ instr_sync
2492+ @ restore regs
2493+ ldmia sp!, {r0, r1}
2494+#endif
2495+ .endm
2496+
2497 .macro pabt_helper
2498 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2499 #ifdef MULTI_PABORT
2500@@ -90,11 +171,15 @@
2501 * Invalid mode handlers
2502 */
2503 .macro inv_entry, reason
2504+
2505+ pax_enter_kernel
2506+
2507 sub sp, sp, #S_FRAME_SIZE
2508 ARM( stmib sp, {r1 - lr} )
2509 THUMB( stmia sp, {r0 - r12} )
2510 THUMB( str sp, [sp, #S_SP] )
2511 THUMB( str lr, [sp, #S_LR] )
2512+
2513 mov r1, #\reason
2514 .endm
2515
2516@@ -150,7 +235,11 @@ ENDPROC(__und_invalid)
2517 .macro svc_entry, stack_hole=0, trace=1
2518 UNWIND(.fnstart )
2519 UNWIND(.save {r0 - pc} )
2520+
2521+ pax_enter_kernel
2522+
2523 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2524+
2525 #ifdef CONFIG_THUMB2_KERNEL
2526 SPFIX( str r0, [sp] ) @ temporarily saved
2527 SPFIX( mov r0, sp )
2528@@ -165,7 +254,12 @@ ENDPROC(__und_invalid)
2529 ldmia r0, {r3 - r5}
2530 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2531 mov r6, #-1 @ "" "" "" ""
2532+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2533+ @ offset sp by 8 as done in pax_enter_kernel
2534+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2535+#else
2536 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2537+#endif
2538 SPFIX( addeq r2, r2, #4 )
2539 str r3, [sp, #-4]! @ save the "real" r0 copied
2540 @ from the exception stack
2541@@ -369,6 +463,9 @@ ENDPROC(__fiq_abt)
2542 .macro usr_entry, trace=1
2543 UNWIND(.fnstart )
2544 UNWIND(.cantunwind ) @ don't unwind the user space
2545+
2546+ pax_enter_kernel_user
2547+
2548 sub sp, sp, #S_FRAME_SIZE
2549 ARM( stmib sp, {r1 - r12} )
2550 THUMB( stmia sp, {r0 - r12} )
2551@@ -479,7 +576,9 @@ __und_usr:
2552 tst r3, #PSR_T_BIT @ Thumb mode?
2553 bne __und_usr_thumb
2554 sub r4, r2, #4 @ ARM instr at LR - 4
2555+ pax_open_userland
2556 1: ldrt r0, [r4]
2557+ pax_close_userland
2558 ARM_BE8(rev r0, r0) @ little endian instruction
2559
2560 @ r0 = 32-bit ARM instruction which caused the exception
2561@@ -513,11 +612,15 @@ __und_usr_thumb:
2562 */
2563 .arch armv6t2
2564 #endif
2565+ pax_open_userland
2566 2: ldrht r5, [r4]
2567+ pax_close_userland
2568 ARM_BE8(rev16 r5, r5) @ little endian instruction
2569 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2570 blo __und_usr_fault_16 @ 16bit undefined instruction
2571+ pax_open_userland
2572 3: ldrht r0, [r2]
2573+ pax_close_userland
2574 ARM_BE8(rev16 r0, r0) @ little endian instruction
2575 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2576 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2577@@ -547,7 +650,8 @@ ENDPROC(__und_usr)
2578 */
2579 .pushsection .fixup, "ax"
2580 .align 2
2581-4: str r4, [sp, #S_PC] @ retry current instruction
2582+4: pax_close_userland
2583+ str r4, [sp, #S_PC] @ retry current instruction
2584 ret r9
2585 .popsection
2586 .pushsection __ex_table,"a"
2587@@ -767,7 +871,7 @@ ENTRY(__switch_to)
2588 THUMB( str lr, [ip], #4 )
2589 ldr r4, [r2, #TI_TP_VALUE]
2590 ldr r5, [r2, #TI_TP_VALUE + 4]
2591-#ifdef CONFIG_CPU_USE_DOMAINS
2592+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2593 ldr r6, [r2, #TI_CPU_DOMAIN]
2594 #endif
2595 switch_tls r1, r4, r5, r3, r7
2596@@ -776,7 +880,7 @@ ENTRY(__switch_to)
2597 ldr r8, =__stack_chk_guard
2598 ldr r7, [r7, #TSK_STACK_CANARY]
2599 #endif
2600-#ifdef CONFIG_CPU_USE_DOMAINS
2601+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2602 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2603 #endif
2604 mov r5, r0
2605diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2606index f8ccc21..83d192f 100644
2607--- a/arch/arm/kernel/entry-common.S
2608+++ b/arch/arm/kernel/entry-common.S
2609@@ -11,18 +11,46 @@
2610 #include <asm/assembler.h>
2611 #include <asm/unistd.h>
2612 #include <asm/ftrace.h>
2613+#include <asm/domain.h>
2614 #include <asm/unwind.h>
2615
2616+#include "entry-header.S"
2617+
2618 #ifdef CONFIG_NEED_RET_TO_USER
2619 #include <mach/entry-macro.S>
2620 #else
2621 .macro arch_ret_to_user, tmp1, tmp2
2622+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2623+ @ save regs
2624+ stmdb sp!, {r1, r2}
2625+ @ read DACR from cpu_domain into r1
2626+ mov r2, sp
2627+ @ assume 8K pages, since we have to split the immediate in two
2628+ bic r2, r2, #(0x1fc0)
2629+ bic r2, r2, #(0x3f)
2630+ ldr r1, [r2, #TI_CPU_DOMAIN]
2631+#ifdef CONFIG_PAX_KERNEXEC
2632+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2633+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2634+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2635+#endif
2636+#ifdef CONFIG_PAX_MEMORY_UDEREF
2637+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2638+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2639+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2640+#endif
2641+ @ write r1 to current_thread_info()->cpu_domain
2642+ str r1, [r2, #TI_CPU_DOMAIN]
2643+ @ write r1 to DACR
2644+ mcr p15, 0, r1, c3, c0, 0
2645+ @ instruction sync
2646+ instr_sync
2647+ @ restore regs
2648+ ldmia sp!, {r1, r2}
2649+#endif
2650 .endm
2651 #endif
2652
2653-#include "entry-header.S"
2654-
2655-
2656 .align 5
2657 /*
2658 * This is the fast syscall return path. We do as little as
2659@@ -171,6 +199,12 @@ ENTRY(vector_swi)
2660 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2661 #endif
2662
2663+ /*
2664+ * do this here to avoid a performance hit of wrapping the code above
2665+ * that directly dereferences userland to parse the SWI instruction
2666+ */
2667+ pax_enter_kernel_user
2668+
2669 adr tbl, sys_call_table @ load syscall table pointer
2670
2671 #if defined(CONFIG_OABI_COMPAT)
2672diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2673index 1a0045a..9b4f34d 100644
2674--- a/arch/arm/kernel/entry-header.S
2675+++ b/arch/arm/kernel/entry-header.S
2676@@ -196,6 +196,60 @@
2677 msr cpsr_c, \rtemp @ switch back to the SVC mode
2678 .endm
2679
2680+ .macro pax_enter_kernel_user
2681+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2682+ @ save regs
2683+ stmdb sp!, {r0, r1}
2684+ @ read DACR from cpu_domain into r1
2685+ mov r0, sp
2686+ @ assume 8K pages, since we have to split the immediate in two
2687+ bic r0, r0, #(0x1fc0)
2688+ bic r0, r0, #(0x3f)
2689+ ldr r1, [r0, #TI_CPU_DOMAIN]
2690+#ifdef CONFIG_PAX_MEMORY_UDEREF
2691+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2692+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2693+#endif
2694+#ifdef CONFIG_PAX_KERNEXEC
2695+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2696+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2697+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2698+#endif
2699+ @ write r1 to current_thread_info()->cpu_domain
2700+ str r1, [r0, #TI_CPU_DOMAIN]
2701+ @ write r1 to DACR
2702+ mcr p15, 0, r1, c3, c0, 0
2703+ @ instruction sync
2704+ instr_sync
2705+ @ restore regs
2706+ ldmia sp!, {r0, r1}
2707+#endif
2708+ .endm
2709+
2710+ .macro pax_exit_kernel
2711+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2712+ @ save regs
2713+ stmdb sp!, {r0, r1}
2714+ @ read old DACR from stack into r1
2715+ ldr r1, [sp, #(8 + S_SP)]
2716+ sub r1, r1, #8
2717+ ldr r1, [r1]
2718+
2719+ @ write r1 to current_thread_info()->cpu_domain
2720+ mov r0, sp
2721+ @ assume 8K pages, since we have to split the immediate in two
2722+ bic r0, r0, #(0x1fc0)
2723+ bic r0, r0, #(0x3f)
2724+ str r1, [r0, #TI_CPU_DOMAIN]
2725+ @ write r1 to DACR
2726+ mcr p15, 0, r1, c3, c0, 0
2727+ @ instruction sync
2728+ instr_sync
2729+ @ restore regs
2730+ ldmia sp!, {r0, r1}
2731+#endif
2732+ .endm
2733+
2734 #ifndef CONFIG_THUMB2_KERNEL
2735 .macro svc_exit, rpsr, irq = 0
2736 .if \irq != 0
2737@@ -215,6 +269,9 @@
2738 blne trace_hardirqs_off
2739 #endif
2740 .endif
2741+
2742+ pax_exit_kernel
2743+
2744 msr spsr_cxsf, \rpsr
2745 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
2746 @ We must avoid clrex due to Cortex-A15 erratum #830321
2747@@ -291,6 +348,9 @@
2748 blne trace_hardirqs_off
2749 #endif
2750 .endif
2751+
2752+ pax_exit_kernel
2753+
2754 ldr lr, [sp, #S_SP] @ top of the stack
2755 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2756
2757diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2758index 059c3da..8e45cfc 100644
2759--- a/arch/arm/kernel/fiq.c
2760+++ b/arch/arm/kernel/fiq.c
2761@@ -95,7 +95,10 @@ void set_fiq_handler(void *start, unsigned int length)
2762 void *base = vectors_page;
2763 unsigned offset = FIQ_OFFSET;
2764
2765+ pax_open_kernel();
2766 memcpy(base + offset, start, length);
2767+ pax_close_kernel();
2768+
2769 if (!cache_is_vipt_nonaliasing())
2770 flush_icache_range((unsigned long)base + offset, offset +
2771 length);
2772diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2773index 0196327..50ac8895 100644
2774--- a/arch/arm/kernel/head.S
2775+++ b/arch/arm/kernel/head.S
2776@@ -444,7 +444,7 @@ __enable_mmu:
2777 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2778 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2779 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2780- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2781+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2782 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2783 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2784 #endif
2785diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2786index 2e11961..07f0704 100644
2787--- a/arch/arm/kernel/module.c
2788+++ b/arch/arm/kernel/module.c
2789@@ -38,12 +38,39 @@
2790 #endif
2791
2792 #ifdef CONFIG_MMU
2793-void *module_alloc(unsigned long size)
2794+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2795 {
2796+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2797+ return NULL;
2798 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2799- GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
2800+ GFP_KERNEL, prot, 0, NUMA_NO_NODE,
2801 __builtin_return_address(0));
2802 }
2803+
2804+void *module_alloc(unsigned long size)
2805+{
2806+
2807+#ifdef CONFIG_PAX_KERNEXEC
2808+ return __module_alloc(size, PAGE_KERNEL);
2809+#else
2810+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2811+#endif
2812+
2813+}
2814+
2815+#ifdef CONFIG_PAX_KERNEXEC
2816+void module_memfree_exec(void *module_region)
2817+{
2818+ module_memfree(module_region);
2819+}
2820+EXPORT_SYMBOL(module_memfree_exec);
2821+
2822+void *module_alloc_exec(unsigned long size)
2823+{
2824+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2825+}
2826+EXPORT_SYMBOL(module_alloc_exec);
2827+#endif
2828 #endif
2829
2830 int
2831diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2832index 69bda1a..755113a 100644
2833--- a/arch/arm/kernel/patch.c
2834+++ b/arch/arm/kernel/patch.c
2835@@ -66,6 +66,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2836 else
2837 __acquire(&patch_lock);
2838
2839+ pax_open_kernel();
2840 if (thumb2 && __opcode_is_thumb16(insn)) {
2841 *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
2842 size = sizeof(u16);
2843@@ -97,6 +98,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2844 *(u32 *)waddr = insn;
2845 size = sizeof(u32);
2846 }
2847+ pax_close_kernel();
2848
2849 if (waddr != addr) {
2850 flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
2851diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2852index 2bf1a16..d959d40 100644
2853--- a/arch/arm/kernel/process.c
2854+++ b/arch/arm/kernel/process.c
2855@@ -213,6 +213,7 @@ void machine_power_off(void)
2856
2857 if (pm_power_off)
2858 pm_power_off();
2859+ BUG();
2860 }
2861
2862 /*
2863@@ -226,7 +227,7 @@ void machine_power_off(void)
2864 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2865 * to use. Implementing such co-ordination would be essentially impossible.
2866 */
2867-void machine_restart(char *cmd)
2868+__noreturn void machine_restart(char *cmd)
2869 {
2870 local_irq_disable();
2871 smp_send_stop();
2872@@ -252,8 +253,8 @@ void __show_regs(struct pt_regs *regs)
2873
2874 show_regs_print_info(KERN_DEFAULT);
2875
2876- print_symbol("PC is at %s\n", instruction_pointer(regs));
2877- print_symbol("LR is at %s\n", regs->ARM_lr);
2878+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2879+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
2880 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2881 "sp : %08lx ip : %08lx fp : %08lx\n",
2882 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2883@@ -430,12 +431,6 @@ unsigned long get_wchan(struct task_struct *p)
2884 return 0;
2885 }
2886
2887-unsigned long arch_randomize_brk(struct mm_struct *mm)
2888-{
2889- unsigned long range_end = mm->brk + 0x02000000;
2890- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2891-}
2892-
2893 #ifdef CONFIG_MMU
2894 #ifdef CONFIG_KUSER_HELPERS
2895 /*
2896@@ -451,7 +446,7 @@ static struct vm_area_struct gate_vma = {
2897
2898 static int __init gate_vma_init(void)
2899 {
2900- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2901+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2902 return 0;
2903 }
2904 arch_initcall(gate_vma_init);
2905@@ -480,81 +475,13 @@ const char *arch_vma_name(struct vm_area_struct *vma)
2906 return is_gate_vma(vma) ? "[vectors]" : NULL;
2907 }
2908
2909-/* If possible, provide a placement hint at a random offset from the
2910- * stack for the signal page.
2911- */
2912-static unsigned long sigpage_addr(const struct mm_struct *mm,
2913- unsigned int npages)
2914-{
2915- unsigned long offset;
2916- unsigned long first;
2917- unsigned long last;
2918- unsigned long addr;
2919- unsigned int slots;
2920-
2921- first = PAGE_ALIGN(mm->start_stack);
2922-
2923- last = TASK_SIZE - (npages << PAGE_SHIFT);
2924-
2925- /* No room after stack? */
2926- if (first > last)
2927- return 0;
2928-
2929- /* Just enough room? */
2930- if (first == last)
2931- return first;
2932-
2933- slots = ((last - first) >> PAGE_SHIFT) + 1;
2934-
2935- offset = get_random_int() % slots;
2936-
2937- addr = first + (offset << PAGE_SHIFT);
2938-
2939- return addr;
2940-}
2941-
2942-static struct page *signal_page;
2943-extern struct page *get_signal_page(void);
2944-
2945-static const struct vm_special_mapping sigpage_mapping = {
2946- .name = "[sigpage]",
2947- .pages = &signal_page,
2948-};
2949-
2950 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2951 {
2952 struct mm_struct *mm = current->mm;
2953- struct vm_area_struct *vma;
2954- unsigned long addr;
2955- unsigned long hint;
2956- int ret = 0;
2957-
2958- if (!signal_page)
2959- signal_page = get_signal_page();
2960- if (!signal_page)
2961- return -ENOMEM;
2962
2963 down_write(&mm->mmap_sem);
2964- hint = sigpage_addr(mm, 1);
2965- addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0);
2966- if (IS_ERR_VALUE(addr)) {
2967- ret = addr;
2968- goto up_fail;
2969- }
2970-
2971- vma = _install_special_mapping(mm, addr, PAGE_SIZE,
2972- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
2973- &sigpage_mapping);
2974-
2975- if (IS_ERR(vma)) {
2976- ret = PTR_ERR(vma);
2977- goto up_fail;
2978- }
2979-
2980- mm->context.sigpage = addr;
2981-
2982- up_fail:
2983+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
2984 up_write(&mm->mmap_sem);
2985- return ret;
2986+ return 0;
2987 }
2988 #endif
2989diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2990index f90fdf4..24e8c84 100644
2991--- a/arch/arm/kernel/psci.c
2992+++ b/arch/arm/kernel/psci.c
2993@@ -26,7 +26,7 @@
2994 #include <asm/psci.h>
2995 #include <asm/system_misc.h>
2996
2997-struct psci_operations psci_ops;
2998+struct psci_operations psci_ops __read_only;
2999
3000 static int (*invoke_psci_fn)(u32, u32, u32, u32);
3001 typedef int (*psci_initcall_t)(const struct device_node *);
3002diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3003index ef9119f..31995a3 100644
3004--- a/arch/arm/kernel/ptrace.c
3005+++ b/arch/arm/kernel/ptrace.c
3006@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
3007 regs->ARM_ip = ip;
3008 }
3009
3010+#ifdef CONFIG_GRKERNSEC_SETXID
3011+extern void gr_delayed_cred_worker(void);
3012+#endif
3013+
3014 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3015 {
3016 current_thread_info()->syscall = scno;
3017
3018+#ifdef CONFIG_GRKERNSEC_SETXID
3019+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3020+ gr_delayed_cred_worker();
3021+#endif
3022+
3023 /* Do the secure computing check first; failures should be fast. */
3024 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
3025 if (secure_computing() == -1)
3026diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3027index 1d60beb..4aa25d5 100644
3028--- a/arch/arm/kernel/setup.c
3029+++ b/arch/arm/kernel/setup.c
3030@@ -105,21 +105,23 @@ EXPORT_SYMBOL(elf_hwcap);
3031 unsigned int elf_hwcap2 __read_mostly;
3032 EXPORT_SYMBOL(elf_hwcap2);
3033
3034+pteval_t __supported_pte_mask __read_only;
3035+pmdval_t __supported_pmd_mask __read_only;
3036
3037 #ifdef MULTI_CPU
3038-struct processor processor __read_mostly;
3039+struct processor processor __read_only;
3040 #endif
3041 #ifdef MULTI_TLB
3042-struct cpu_tlb_fns cpu_tlb __read_mostly;
3043+struct cpu_tlb_fns cpu_tlb __read_only;
3044 #endif
3045 #ifdef MULTI_USER
3046-struct cpu_user_fns cpu_user __read_mostly;
3047+struct cpu_user_fns cpu_user __read_only;
3048 #endif
3049 #ifdef MULTI_CACHE
3050-struct cpu_cache_fns cpu_cache __read_mostly;
3051+struct cpu_cache_fns cpu_cache __read_only;
3052 #endif
3053 #ifdef CONFIG_OUTER_CACHE
3054-struct outer_cache_fns outer_cache __read_mostly;
3055+struct outer_cache_fns outer_cache __read_only;
3056 EXPORT_SYMBOL(outer_cache);
3057 #endif
3058
3059@@ -250,9 +252,13 @@ static int __get_cpu_architecture(void)
3060 * Register 0 and check for VMSAv7 or PMSAv7 */
3061 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
3062 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3063- (mmfr0 & 0x000000f0) >= 0x00000030)
3064+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3065 cpu_arch = CPU_ARCH_ARMv7;
3066- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3067+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3068+ __supported_pte_mask |= L_PTE_PXN;
3069+ __supported_pmd_mask |= PMD_PXNTABLE;
3070+ }
3071+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3072 (mmfr0 & 0x000000f0) == 0x00000020)
3073 cpu_arch = CPU_ARCH_ARMv6;
3074 else
3075diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3076index 023ac90..0a69950 100644
3077--- a/arch/arm/kernel/signal.c
3078+++ b/arch/arm/kernel/signal.c
3079@@ -24,8 +24,6 @@
3080
3081 extern const unsigned long sigreturn_codes[7];
3082
3083-static unsigned long signal_return_offset;
3084-
3085 #ifdef CONFIG_CRUNCH
3086 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3087 {
3088@@ -396,8 +394,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3089 * except when the MPU has protected the vectors
3090 * page from PL0
3091 */
3092- retcode = mm->context.sigpage + signal_return_offset +
3093- (idx << 2) + thumb;
3094+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3095 } else
3096 #endif
3097 {
3098@@ -603,33 +600,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3099 } while (thread_flags & _TIF_WORK_MASK);
3100 return 0;
3101 }
3102-
3103-struct page *get_signal_page(void)
3104-{
3105- unsigned long ptr;
3106- unsigned offset;
3107- struct page *page;
3108- void *addr;
3109-
3110- page = alloc_pages(GFP_KERNEL, 0);
3111-
3112- if (!page)
3113- return NULL;
3114-
3115- addr = page_address(page);
3116-
3117- /* Give the signal return code some randomness */
3118- offset = 0x200 + (get_random_int() & 0x7fc);
3119- signal_return_offset = offset;
3120-
3121- /*
3122- * Copy signal return handlers into the vector page, and
3123- * set sigreturn to be a pointer to these.
3124- */
3125- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3126-
3127- ptr = (unsigned long)addr + offset;
3128- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3129-
3130- return page;
3131-}
3132diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3133index 86ef244..c518451 100644
3134--- a/arch/arm/kernel/smp.c
3135+++ b/arch/arm/kernel/smp.c
3136@@ -76,7 +76,7 @@ enum ipi_msg_type {
3137
3138 static DECLARE_COMPLETION(cpu_running);
3139
3140-static struct smp_operations smp_ops;
3141+static struct smp_operations smp_ops __read_only;
3142
3143 void __init smp_set_ops(struct smp_operations *ops)
3144 {
3145diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
3146index 7a3be1d..b00c7de 100644
3147--- a/arch/arm/kernel/tcm.c
3148+++ b/arch/arm/kernel/tcm.c
3149@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
3150 .virtual = ITCM_OFFSET,
3151 .pfn = __phys_to_pfn(ITCM_OFFSET),
3152 .length = 0,
3153- .type = MT_MEMORY_RWX_ITCM,
3154+ .type = MT_MEMORY_RX_ITCM,
3155 }
3156 };
3157
3158@@ -267,7 +267,9 @@ no_dtcm:
3159 start = &__sitcm_text;
3160 end = &__eitcm_text;
3161 ram = &__itcm_start;
3162+ pax_open_kernel();
3163 memcpy(start, ram, itcm_code_sz);
3164+ pax_close_kernel();
3165 pr_debug("CPU ITCM: copied code from %p - %p\n",
3166 start, end);
3167 itcm_present = true;
3168diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3169index 788e23f..6fa06a1 100644
3170--- a/arch/arm/kernel/traps.c
3171+++ b/arch/arm/kernel/traps.c
3172@@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3173 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3174 {
3175 #ifdef CONFIG_KALLSYMS
3176- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3177+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3178 #else
3179 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3180 #endif
3181@@ -267,6 +267,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3182 static int die_owner = -1;
3183 static unsigned int die_nest_count;
3184
3185+extern void gr_handle_kernel_exploit(void);
3186+
3187 static unsigned long oops_begin(void)
3188 {
3189 int cpu;
3190@@ -309,6 +311,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3191 panic("Fatal exception in interrupt");
3192 if (panic_on_oops)
3193 panic("Fatal exception");
3194+
3195+ gr_handle_kernel_exploit();
3196+
3197 if (signr)
3198 do_exit(signr);
3199 }
3200@@ -880,7 +885,11 @@ void __init early_trap_init(void *vectors_base)
3201 kuser_init(vectors_base);
3202
3203 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3204- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3205+
3206+#ifndef CONFIG_PAX_MEMORY_UDEREF
3207+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3208+#endif
3209+
3210 #else /* ifndef CONFIG_CPU_V7M */
3211 /*
3212 * on V7-M there is no need to copy the vector table to a dedicated
3213diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3214index b31aa73..cc4b7a1 100644
3215--- a/arch/arm/kernel/vmlinux.lds.S
3216+++ b/arch/arm/kernel/vmlinux.lds.S
3217@@ -37,7 +37,7 @@
3218 #endif
3219
3220 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3221- defined(CONFIG_GENERIC_BUG)
3222+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3223 #define ARM_EXIT_KEEP(x) x
3224 #define ARM_EXIT_DISCARD(x)
3225 #else
3226@@ -123,6 +123,8 @@ SECTIONS
3227 #ifdef CONFIG_DEBUG_RODATA
3228 . = ALIGN(1<<SECTION_SHIFT);
3229 #endif
3230+ _etext = .; /* End of text section */
3231+
3232 RO_DATA(PAGE_SIZE)
3233
3234 . = ALIGN(4);
3235@@ -153,8 +155,6 @@ SECTIONS
3236
3237 NOTES
3238
3239- _etext = .; /* End of text and rodata section */
3240-
3241 #ifndef CONFIG_XIP_KERNEL
3242 # ifdef CONFIG_ARM_KERNMEM_PERMS
3243 . = ALIGN(1<<SECTION_SHIFT);
3244diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3245index b652af5..60231ab 100644
3246--- a/arch/arm/kvm/arm.c
3247+++ b/arch/arm/kvm/arm.c
3248@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
3249 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3250
3251 /* The VMID used in the VTTBR */
3252-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3253+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3254 static u8 kvm_next_vmid;
3255 static DEFINE_SPINLOCK(kvm_vmid_lock);
3256
3257@@ -358,7 +358,7 @@ void force_vm_exit(const cpumask_t *mask)
3258 */
3259 static bool need_new_vmid_gen(struct kvm *kvm)
3260 {
3261- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3262+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3263 }
3264
3265 /**
3266@@ -391,7 +391,7 @@ static void update_vttbr(struct kvm *kvm)
3267
3268 /* First user of a new VMID generation? */
3269 if (unlikely(kvm_next_vmid == 0)) {
3270- atomic64_inc(&kvm_vmid_gen);
3271+ atomic64_inc_unchecked(&kvm_vmid_gen);
3272 kvm_next_vmid = 1;
3273
3274 /*
3275@@ -408,7 +408,7 @@ static void update_vttbr(struct kvm *kvm)
3276 kvm_call_hyp(__kvm_flush_vm_context);
3277 }
3278
3279- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3280+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3281 kvm->arch.vmid = kvm_next_vmid;
3282 kvm_next_vmid++;
3283
3284@@ -1087,7 +1087,7 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
3285 /**
3286 * Initialize Hyp-mode and memory mappings on all CPUs.
3287 */
3288-int kvm_arch_init(void *opaque)
3289+int kvm_arch_init(const void *opaque)
3290 {
3291 int err;
3292 int ret, cpu;
3293diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3294index 14a0d98..7771a7d 100644
3295--- a/arch/arm/lib/clear_user.S
3296+++ b/arch/arm/lib/clear_user.S
3297@@ -12,14 +12,14 @@
3298
3299 .text
3300
3301-/* Prototype: int __clear_user(void *addr, size_t sz)
3302+/* Prototype: int ___clear_user(void *addr, size_t sz)
3303 * Purpose : clear some user memory
3304 * Params : addr - user memory address to clear
3305 * : sz - number of bytes to clear
3306 * Returns : number of bytes NOT cleared
3307 */
3308 ENTRY(__clear_user_std)
3309-WEAK(__clear_user)
3310+WEAK(___clear_user)
3311 stmfd sp!, {r1, lr}
3312 mov r2, #0
3313 cmp r1, #4
3314@@ -44,7 +44,7 @@ WEAK(__clear_user)
3315 USER( strnebt r2, [r0])
3316 mov r0, #0
3317 ldmfd sp!, {r1, pc}
3318-ENDPROC(__clear_user)
3319+ENDPROC(___clear_user)
3320 ENDPROC(__clear_user_std)
3321
3322 .pushsection .fixup,"ax"
3323diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3324index 7a235b9..73a0556 100644
3325--- a/arch/arm/lib/copy_from_user.S
3326+++ b/arch/arm/lib/copy_from_user.S
3327@@ -17,7 +17,7 @@
3328 /*
3329 * Prototype:
3330 *
3331- * size_t __copy_from_user(void *to, const void *from, size_t n)
3332+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3333 *
3334 * Purpose:
3335 *
3336@@ -89,11 +89,11 @@
3337
3338 .text
3339
3340-ENTRY(__copy_from_user)
3341+ENTRY(___copy_from_user)
3342
3343 #include "copy_template.S"
3344
3345-ENDPROC(__copy_from_user)
3346+ENDPROC(___copy_from_user)
3347
3348 .pushsection .fixup,"ax"
3349 .align 0
3350diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3351index 6ee2f67..d1cce76 100644
3352--- a/arch/arm/lib/copy_page.S
3353+++ b/arch/arm/lib/copy_page.S
3354@@ -10,6 +10,7 @@
3355 * ASM optimised string functions
3356 */
3357 #include <linux/linkage.h>
3358+#include <linux/const.h>
3359 #include <asm/assembler.h>
3360 #include <asm/asm-offsets.h>
3361 #include <asm/cache.h>
3362diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3363index a9d3db1..164b089 100644
3364--- a/arch/arm/lib/copy_to_user.S
3365+++ b/arch/arm/lib/copy_to_user.S
3366@@ -17,7 +17,7 @@
3367 /*
3368 * Prototype:
3369 *
3370- * size_t __copy_to_user(void *to, const void *from, size_t n)
3371+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3372 *
3373 * Purpose:
3374 *
3375@@ -93,11 +93,11 @@
3376 .text
3377
3378 ENTRY(__copy_to_user_std)
3379-WEAK(__copy_to_user)
3380+WEAK(___copy_to_user)
3381
3382 #include "copy_template.S"
3383
3384-ENDPROC(__copy_to_user)
3385+ENDPROC(___copy_to_user)
3386 ENDPROC(__copy_to_user_std)
3387
3388 .pushsection .fixup,"ax"
3389diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3390index 7d08b43..f7ca7ea 100644
3391--- a/arch/arm/lib/csumpartialcopyuser.S
3392+++ b/arch/arm/lib/csumpartialcopyuser.S
3393@@ -57,8 +57,8 @@
3394 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3395 */
3396
3397-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3398-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3399+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3400+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3401
3402 #include "csumpartialcopygeneric.S"
3403
3404diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3405index 312d43e..21d2322 100644
3406--- a/arch/arm/lib/delay.c
3407+++ b/arch/arm/lib/delay.c
3408@@ -29,7 +29,7 @@
3409 /*
3410 * Default to the loop-based delay implementation.
3411 */
3412-struct arm_delay_ops arm_delay_ops = {
3413+struct arm_delay_ops arm_delay_ops __read_only = {
3414 .delay = __loop_delay,
3415 .const_udelay = __loop_const_udelay,
3416 .udelay = __loop_udelay,
3417diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3418index 3e58d71..029817c 100644
3419--- a/arch/arm/lib/uaccess_with_memcpy.c
3420+++ b/arch/arm/lib/uaccess_with_memcpy.c
3421@@ -136,7 +136,7 @@ out:
3422 }
3423
3424 unsigned long
3425-__copy_to_user(void __user *to, const void *from, unsigned long n)
3426+___copy_to_user(void __user *to, const void *from, unsigned long n)
3427 {
3428 /*
3429 * This test is stubbed out of the main function above to keep
3430@@ -190,7 +190,7 @@ out:
3431 return n;
3432 }
3433
3434-unsigned long __clear_user(void __user *addr, unsigned long n)
3435+unsigned long ___clear_user(void __user *addr, unsigned long n)
3436 {
3437 /* See rational for this in __copy_to_user() above. */
3438 if (n < 64)
3439diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
3440index 318d127..9aab0d1 100644
3441--- a/arch/arm/mach-exynos/suspend.c
3442+++ b/arch/arm/mach-exynos/suspend.c
3443@@ -18,6 +18,7 @@
3444 #include <linux/syscore_ops.h>
3445 #include <linux/cpu_pm.h>
3446 #include <linux/io.h>
3447+#include <linux/irq.h>
3448 #include <linux/irqchip/arm-gic.h>
3449 #include <linux/err.h>
3450 #include <linux/regulator/machine.h>
3451@@ -632,8 +633,10 @@ void __init exynos_pm_init(void)
3452 tmp |= pm_data->wake_disable_mask;
3453 pmu_raw_writel(tmp, S5P_WAKEUP_MASK);
3454
3455- exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3456- exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3457+ pax_open_kernel();
3458+ *(void **)&exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3459+ *(void **)&exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3460+ pax_close_kernel();
3461
3462 register_syscore_ops(&exynos_pm_syscore_ops);
3463 suspend_set_ops(&exynos_suspend_ops);
3464diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
3465index 0662087..004d163 100644
3466--- a/arch/arm/mach-keystone/keystone.c
3467+++ b/arch/arm/mach-keystone/keystone.c
3468@@ -27,7 +27,7 @@
3469
3470 #include "keystone.h"
3471
3472-static struct notifier_block platform_nb;
3473+static notifier_block_no_const platform_nb;
3474 static unsigned long keystone_dma_pfn_offset __read_mostly;
3475
3476 static int keystone_platform_notifier(struct notifier_block *nb,
3477diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
3478index e46e9ea..9141c83 100644
3479--- a/arch/arm/mach-mvebu/coherency.c
3480+++ b/arch/arm/mach-mvebu/coherency.c
3481@@ -117,7 +117,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
3482
3483 /*
3484 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
3485- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
3486+ * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
3487 * is needed as a workaround for a deadlock issue between the PCIe
3488 * interface and the cache controller.
3489 */
3490@@ -130,7 +130,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
3491 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
3492
3493 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
3494- mtype = MT_UNCACHED;
3495+ mtype = MT_UNCACHED_RW;
3496
3497 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
3498 }
3499diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3500index b6443a4..20a0b74 100644
3501--- a/arch/arm/mach-omap2/board-n8x0.c
3502+++ b/arch/arm/mach-omap2/board-n8x0.c
3503@@ -569,7 +569,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3504 }
3505 #endif
3506
3507-struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3508+struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3509 .late_init = n8x0_menelaus_late_init,
3510 };
3511
3512diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3513index 79f49d9..70bf184 100644
3514--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3515+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3516@@ -86,7 +86,7 @@ struct cpu_pm_ops {
3517 void (*resume)(void);
3518 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3519 void (*hotplug_restart)(void);
3520-};
3521+} __no_const;
3522
3523 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3524 static struct powerdomain *mpuss_pd;
3525@@ -105,7 +105,7 @@ static void dummy_cpu_resume(void)
3526 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3527 {}
3528
3529-struct cpu_pm_ops omap_pm_ops = {
3530+static struct cpu_pm_ops omap_pm_ops __read_only = {
3531 .finish_suspend = default_finish_suspend,
3532 .resume = dummy_cpu_resume,
3533 .scu_prepare = dummy_scu_prepare,
3534diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
3535index 5305ec7..6d74045 100644
3536--- a/arch/arm/mach-omap2/omap-smp.c
3537+++ b/arch/arm/mach-omap2/omap-smp.c
3538@@ -19,6 +19,7 @@
3539 #include <linux/device.h>
3540 #include <linux/smp.h>
3541 #include <linux/io.h>
3542+#include <linux/irq.h>
3543 #include <linux/irqchip/arm-gic.h>
3544
3545 #include <asm/smp_scu.h>
3546diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3547index f961c46..4a453dc 100644
3548--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3549+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3550@@ -344,7 +344,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3551 return NOTIFY_OK;
3552 }
3553
3554-static struct notifier_block __refdata irq_hotplug_notifier = {
3555+static struct notifier_block irq_hotplug_notifier = {
3556 .notifier_call = irq_cpu_hotplug_notify,
3557 };
3558
3559diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3560index be9541e..821805f 100644
3561--- a/arch/arm/mach-omap2/omap_device.c
3562+++ b/arch/arm/mach-omap2/omap_device.c
3563@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
3564 struct platform_device __init *omap_device_build(const char *pdev_name,
3565 int pdev_id,
3566 struct omap_hwmod *oh,
3567- void *pdata, int pdata_len)
3568+ const void *pdata, int pdata_len)
3569 {
3570 struct omap_hwmod *ohs[] = { oh };
3571
3572@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3573 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3574 int pdev_id,
3575 struct omap_hwmod **ohs,
3576- int oh_cnt, void *pdata,
3577+ int oh_cnt, const void *pdata,
3578 int pdata_len)
3579 {
3580 int ret = -ENOMEM;
3581diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3582index 78c02b3..c94109a 100644
3583--- a/arch/arm/mach-omap2/omap_device.h
3584+++ b/arch/arm/mach-omap2/omap_device.h
3585@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3586 /* Core code interface */
3587
3588 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3589- struct omap_hwmod *oh, void *pdata,
3590+ struct omap_hwmod *oh, const void *pdata,
3591 int pdata_len);
3592
3593 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3594 struct omap_hwmod **oh, int oh_cnt,
3595- void *pdata, int pdata_len);
3596+ const void *pdata, int pdata_len);
3597
3598 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3599 struct omap_hwmod **ohs, int oh_cnt);
3600diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3601index 355b089..2c9d7c3 100644
3602--- a/arch/arm/mach-omap2/omap_hwmod.c
3603+++ b/arch/arm/mach-omap2/omap_hwmod.c
3604@@ -193,10 +193,10 @@ struct omap_hwmod_soc_ops {
3605 int (*init_clkdm)(struct omap_hwmod *oh);
3606 void (*update_context_lost)(struct omap_hwmod *oh);
3607 int (*get_context_lost)(struct omap_hwmod *oh);
3608-};
3609+} __no_const;
3610
3611 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3612-static struct omap_hwmod_soc_ops soc_ops;
3613+static struct omap_hwmod_soc_ops soc_ops __read_only;
3614
3615 /* omap_hwmod_list contains all registered struct omap_hwmods */
3616 static LIST_HEAD(omap_hwmod_list);
3617diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
3618index 95fee54..cfa9cf1 100644
3619--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
3620+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
3621@@ -10,6 +10,7 @@
3622
3623 #include <linux/kernel.h>
3624 #include <linux/init.h>
3625+#include <asm/pgtable.h>
3626
3627 #include "powerdomain.h"
3628
3629@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
3630
3631 void __init am43xx_powerdomains_init(void)
3632 {
3633- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3634+ pax_open_kernel();
3635+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3636+ pax_close_kernel();
3637 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
3638 pwrdm_register_pwrdms(powerdomains_am43xx);
3639 pwrdm_complete_init();
3640diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3641index ff0a68c..b312aa0 100644
3642--- a/arch/arm/mach-omap2/wd_timer.c
3643+++ b/arch/arm/mach-omap2/wd_timer.c
3644@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3645 struct omap_hwmod *oh;
3646 char *oh_name = "wd_timer2";
3647 char *dev_name = "omap_wdt";
3648- struct omap_wd_timer_platform_data pdata;
3649+ static struct omap_wd_timer_platform_data pdata = {
3650+ .read_reset_sources = prm_read_reset_sources
3651+ };
3652
3653 if (!cpu_class_is_omap2() || of_have_populated_dt())
3654 return 0;
3655@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3656 return -EINVAL;
3657 }
3658
3659- pdata.read_reset_sources = prm_read_reset_sources;
3660-
3661 pdev = omap_device_build(dev_name, id, oh, &pdata,
3662 sizeof(struct omap_wd_timer_platform_data));
3663 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3664diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3665index 4f25a7c..a81be85 100644
3666--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3667+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3668@@ -179,7 +179,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3669 bool entered_lp2 = false;
3670
3671 if (tegra_pending_sgi())
3672- ACCESS_ONCE(abort_flag) = true;
3673+ ACCESS_ONCE_RW(abort_flag) = true;
3674
3675 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3676
3677diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
3678index ab95f53..4b977a7 100644
3679--- a/arch/arm/mach-tegra/irq.c
3680+++ b/arch/arm/mach-tegra/irq.c
3681@@ -20,6 +20,7 @@
3682 #include <linux/cpu_pm.h>
3683 #include <linux/interrupt.h>
3684 #include <linux/io.h>
3685+#include <linux/irq.h>
3686 #include <linux/irqchip/arm-gic.h>
3687 #include <linux/irq.h>
3688 #include <linux/kernel.h>
3689diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
3690index 2cb587b..6ddfebf 100644
3691--- a/arch/arm/mach-ux500/pm.c
3692+++ b/arch/arm/mach-ux500/pm.c
3693@@ -10,6 +10,7 @@
3694 */
3695
3696 #include <linux/kernel.h>
3697+#include <linux/irq.h>
3698 #include <linux/irqchip/arm-gic.h>
3699 #include <linux/delay.h>
3700 #include <linux/io.h>
3701diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3702index 2dea8b5..6499da2 100644
3703--- a/arch/arm/mach-ux500/setup.h
3704+++ b/arch/arm/mach-ux500/setup.h
3705@@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
3706 .type = MT_DEVICE, \
3707 }
3708
3709-#define __MEM_DEV_DESC(x, sz) { \
3710- .virtual = IO_ADDRESS(x), \
3711- .pfn = __phys_to_pfn(x), \
3712- .length = sz, \
3713- .type = MT_MEMORY_RWX, \
3714-}
3715-
3716 extern struct smp_operations ux500_smp_ops;
3717 extern void ux500_cpu_die(unsigned int cpu);
3718
3719diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
3720index 52d768f..5f93180 100644
3721--- a/arch/arm/mach-zynq/platsmp.c
3722+++ b/arch/arm/mach-zynq/platsmp.c
3723@@ -24,6 +24,7 @@
3724 #include <linux/io.h>
3725 #include <asm/cacheflush.h>
3726 #include <asm/smp_scu.h>
3727+#include <linux/irq.h>
3728 #include <linux/irqchip/arm-gic.h>
3729 #include "common.h"
3730
3731diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3732index 9b4f29e..bbf3bfa 100644
3733--- a/arch/arm/mm/Kconfig
3734+++ b/arch/arm/mm/Kconfig
3735@@ -446,6 +446,7 @@ config CPU_32v5
3736
3737 config CPU_32v6
3738 bool
3739+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3740 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3741
3742 config CPU_32v6K
3743@@ -600,6 +601,7 @@ config CPU_CP15_MPU
3744
3745 config CPU_USE_DOMAINS
3746 bool
3747+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3748 help
3749 This option enables or disables the use of domain switching
3750 via the set_fs() function.
3751@@ -798,7 +800,7 @@ config NEED_KUSER_HELPERS
3752
3753 config KUSER_HELPERS
3754 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3755- depends on MMU
3756+ depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND)
3757 default y
3758 help
3759 Warning: disabling this option may break user programs.
3760@@ -812,7 +814,7 @@ config KUSER_HELPERS
3761 See Documentation/arm/kernel_user_helpers.txt for details.
3762
3763 However, the fixed address nature of these helpers can be used
3764- by ROP (return orientated programming) authors when creating
3765+ by ROP (Return Oriented Programming) authors when creating
3766 exploits.
3767
3768 If all of the binaries and libraries which run on your platform
3769diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3770index 2c0c541..4585df9 100644
3771--- a/arch/arm/mm/alignment.c
3772+++ b/arch/arm/mm/alignment.c
3773@@ -216,10 +216,12 @@ union offset_union {
3774 #define __get16_unaligned_check(ins,val,addr) \
3775 do { \
3776 unsigned int err = 0, v, a = addr; \
3777+ pax_open_userland(); \
3778 __get8_unaligned_check(ins,v,a,err); \
3779 val = v << ((BE) ? 8 : 0); \
3780 __get8_unaligned_check(ins,v,a,err); \
3781 val |= v << ((BE) ? 0 : 8); \
3782+ pax_close_userland(); \
3783 if (err) \
3784 goto fault; \
3785 } while (0)
3786@@ -233,6 +235,7 @@ union offset_union {
3787 #define __get32_unaligned_check(ins,val,addr) \
3788 do { \
3789 unsigned int err = 0, v, a = addr; \
3790+ pax_open_userland(); \
3791 __get8_unaligned_check(ins,v,a,err); \
3792 val = v << ((BE) ? 24 : 0); \
3793 __get8_unaligned_check(ins,v,a,err); \
3794@@ -241,6 +244,7 @@ union offset_union {
3795 val |= v << ((BE) ? 8 : 16); \
3796 __get8_unaligned_check(ins,v,a,err); \
3797 val |= v << ((BE) ? 0 : 24); \
3798+ pax_close_userland(); \
3799 if (err) \
3800 goto fault; \
3801 } while (0)
3802@@ -254,6 +258,7 @@ union offset_union {
3803 #define __put16_unaligned_check(ins,val,addr) \
3804 do { \
3805 unsigned int err = 0, v = val, a = addr; \
3806+ pax_open_userland(); \
3807 __asm__( FIRST_BYTE_16 \
3808 ARM( "1: "ins" %1, [%2], #1\n" ) \
3809 THUMB( "1: "ins" %1, [%2]\n" ) \
3810@@ -273,6 +278,7 @@ union offset_union {
3811 " .popsection\n" \
3812 : "=r" (err), "=&r" (v), "=&r" (a) \
3813 : "0" (err), "1" (v), "2" (a)); \
3814+ pax_close_userland(); \
3815 if (err) \
3816 goto fault; \
3817 } while (0)
3818@@ -286,6 +292,7 @@ union offset_union {
3819 #define __put32_unaligned_check(ins,val,addr) \
3820 do { \
3821 unsigned int err = 0, v = val, a = addr; \
3822+ pax_open_userland(); \
3823 __asm__( FIRST_BYTE_32 \
3824 ARM( "1: "ins" %1, [%2], #1\n" ) \
3825 THUMB( "1: "ins" %1, [%2]\n" ) \
3826@@ -315,6 +322,7 @@ union offset_union {
3827 " .popsection\n" \
3828 : "=r" (err), "=&r" (v), "=&r" (a) \
3829 : "0" (err), "1" (v), "2" (a)); \
3830+ pax_close_userland(); \
3831 if (err) \
3832 goto fault; \
3833 } while (0)
3834diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3835index 8f15f70..d599a2b 100644
3836--- a/arch/arm/mm/cache-l2x0.c
3837+++ b/arch/arm/mm/cache-l2x0.c
3838@@ -43,7 +43,7 @@ struct l2c_init_data {
3839 void (*save)(void __iomem *);
3840 void (*configure)(void __iomem *);
3841 struct outer_cache_fns outer_cache;
3842-};
3843+} __do_const;
3844
3845 #define CACHE_LINE_SIZE 32
3846
3847diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3848index 845769e..4278fd7 100644
3849--- a/arch/arm/mm/context.c
3850+++ b/arch/arm/mm/context.c
3851@@ -43,7 +43,7 @@
3852 #define NUM_USER_ASIDS ASID_FIRST_VERSION
3853
3854 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3855-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3856+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3857 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
3858
3859 static DEFINE_PER_CPU(atomic64_t, active_asids);
3860@@ -178,7 +178,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3861 {
3862 static u32 cur_idx = 1;
3863 u64 asid = atomic64_read(&mm->context.id);
3864- u64 generation = atomic64_read(&asid_generation);
3865+ u64 generation = atomic64_read_unchecked(&asid_generation);
3866
3867 if (asid != 0) {
3868 /*
3869@@ -208,7 +208,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3870 */
3871 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
3872 if (asid == NUM_USER_ASIDS) {
3873- generation = atomic64_add_return(ASID_FIRST_VERSION,
3874+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
3875 &asid_generation);
3876 flush_context(cpu);
3877 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3878@@ -240,14 +240,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
3879 cpu_set_reserved_ttbr0();
3880
3881 asid = atomic64_read(&mm->context.id);
3882- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
3883+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
3884 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
3885 goto switch_mm_fastpath;
3886
3887 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
3888 /* Check that our ASID belongs to the current generation. */
3889 asid = atomic64_read(&mm->context.id);
3890- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
3891+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
3892 asid = new_context(mm, cpu);
3893 atomic64_set(&mm->context.id, asid);
3894 }
3895diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3896index 6333d9c..fd09b46 100644
3897--- a/arch/arm/mm/fault.c
3898+++ b/arch/arm/mm/fault.c
3899@@ -25,6 +25,7 @@
3900 #include <asm/system_misc.h>
3901 #include <asm/system_info.h>
3902 #include <asm/tlbflush.h>
3903+#include <asm/sections.h>
3904
3905 #include "fault.h"
3906
3907@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3908 if (fixup_exception(regs))
3909 return;
3910
3911+#ifdef CONFIG_PAX_MEMORY_UDEREF
3912+ if (addr < TASK_SIZE) {
3913+ if (current->signal->curr_ip)
3914+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3915+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3916+ else
3917+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3918+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3919+ }
3920+#endif
3921+
3922+#ifdef CONFIG_PAX_KERNEXEC
3923+ if ((fsr & FSR_WRITE) &&
3924+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3925+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3926+ {
3927+ if (current->signal->curr_ip)
3928+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3929+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3930+ else
3931+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3932+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3933+ }
3934+#endif
3935+
3936 /*
3937 * No handler, we'll have to terminate things with extreme prejudice.
3938 */
3939@@ -173,6 +199,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3940 }
3941 #endif
3942
3943+#ifdef CONFIG_PAX_PAGEEXEC
3944+ if (fsr & FSR_LNX_PF) {
3945+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3946+ do_group_exit(SIGKILL);
3947+ }
3948+#endif
3949+
3950 tsk->thread.address = addr;
3951 tsk->thread.error_code = fsr;
3952 tsk->thread.trap_no = 14;
3953@@ -400,6 +433,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3954 }
3955 #endif /* CONFIG_MMU */
3956
3957+#ifdef CONFIG_PAX_PAGEEXEC
3958+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3959+{
3960+ long i;
3961+
3962+ printk(KERN_ERR "PAX: bytes at PC: ");
3963+ for (i = 0; i < 20; i++) {
3964+ unsigned char c;
3965+ if (get_user(c, (__force unsigned char __user *)pc+i))
3966+ printk(KERN_CONT "?? ");
3967+ else
3968+ printk(KERN_CONT "%02x ", c);
3969+ }
3970+ printk("\n");
3971+
3972+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3973+ for (i = -1; i < 20; i++) {
3974+ unsigned long c;
3975+ if (get_user(c, (__force unsigned long __user *)sp+i))
3976+ printk(KERN_CONT "???????? ");
3977+ else
3978+ printk(KERN_CONT "%08lx ", c);
3979+ }
3980+ printk("\n");
3981+}
3982+#endif
3983+
3984 /*
3985 * First Level Translation Fault Handler
3986 *
3987@@ -547,9 +607,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3988 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3989 struct siginfo info;
3990
3991+#ifdef CONFIG_PAX_MEMORY_UDEREF
3992+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3993+ if (current->signal->curr_ip)
3994+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3995+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3996+ else
3997+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3998+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3999+ goto die;
4000+ }
4001+#endif
4002+
4003 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
4004 return;
4005
4006+die:
4007 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
4008 inf->name, fsr, addr);
4009 show_pte(current->mm, addr);
4010@@ -574,15 +647,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
4011 ifsr_info[nr].name = name;
4012 }
4013
4014+asmlinkage int sys_sigreturn(struct pt_regs *regs);
4015+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
4016+
4017 asmlinkage void __exception
4018 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4019 {
4020 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
4021 struct siginfo info;
4022+ unsigned long pc = instruction_pointer(regs);
4023+
4024+ if (user_mode(regs)) {
4025+ unsigned long sigpage = current->mm->context.sigpage;
4026+
4027+ if (sigpage <= pc && pc < sigpage + 7*4) {
4028+ if (pc < sigpage + 3*4)
4029+ sys_sigreturn(regs);
4030+ else
4031+ sys_rt_sigreturn(regs);
4032+ return;
4033+ }
4034+ if (pc == 0xffff0f60UL) {
4035+ /*
4036+ * PaX: __kuser_cmpxchg64 emulation
4037+ */
4038+ // TODO
4039+ //regs->ARM_pc = regs->ARM_lr;
4040+ //return;
4041+ }
4042+ if (pc == 0xffff0fa0UL) {
4043+ /*
4044+ * PaX: __kuser_memory_barrier emulation
4045+ */
4046+ // dmb(); implied by the exception
4047+ regs->ARM_pc = regs->ARM_lr;
4048+ return;
4049+ }
4050+ if (pc == 0xffff0fc0UL) {
4051+ /*
4052+ * PaX: __kuser_cmpxchg emulation
4053+ */
4054+ // TODO
4055+ //long new;
4056+ //int op;
4057+
4058+ //op = FUTEX_OP_SET << 28;
4059+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4060+ //regs->ARM_r0 = old != new;
4061+ //regs->ARM_pc = regs->ARM_lr;
4062+ //return;
4063+ }
4064+ if (pc == 0xffff0fe0UL) {
4065+ /*
4066+ * PaX: __kuser_get_tls emulation
4067+ */
4068+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4069+ regs->ARM_pc = regs->ARM_lr;
4070+ return;
4071+ }
4072+ }
4073+
4074+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4075+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4076+ if (current->signal->curr_ip)
4077+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4078+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4079+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4080+ else
4081+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4082+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4083+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4084+ goto die;
4085+ }
4086+#endif
4087+
4088+#ifdef CONFIG_PAX_REFCOUNT
4089+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4090+#ifdef CONFIG_THUMB2_KERNEL
4091+ unsigned short bkpt;
4092+
4093+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
4094+#else
4095+ unsigned int bkpt;
4096+
4097+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4098+#endif
4099+ current->thread.error_code = ifsr;
4100+ current->thread.trap_no = 0;
4101+ pax_report_refcount_overflow(regs);
4102+ fixup_exception(regs);
4103+ return;
4104+ }
4105+ }
4106+#endif
4107
4108 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4109 return;
4110
4111+die:
4112 pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4113 inf->name, ifsr, addr);
4114
4115diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4116index cf08bdf..772656c 100644
4117--- a/arch/arm/mm/fault.h
4118+++ b/arch/arm/mm/fault.h
4119@@ -3,6 +3,7 @@
4120
4121 /*
4122 * Fault status register encodings. We steal bit 31 for our own purposes.
4123+ * Set when the FSR value is from an instruction fault.
4124 */
4125 #define FSR_LNX_PF (1 << 31)
4126 #define FSR_WRITE (1 << 11)
4127@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4128 }
4129 #endif
4130
4131+/* valid for LPAE and !LPAE */
4132+static inline int is_xn_fault(unsigned int fsr)
4133+{
4134+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4135+}
4136+
4137+static inline int is_domain_fault(unsigned int fsr)
4138+{
4139+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4140+}
4141+
4142 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4143 unsigned long search_exception_table(unsigned long addr);
4144
4145diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4146index 1609b02..def0785 100644
4147--- a/arch/arm/mm/init.c
4148+++ b/arch/arm/mm/init.c
4149@@ -755,7 +755,46 @@ void free_tcmmem(void)
4150 {
4151 #ifdef CONFIG_HAVE_TCM
4152 extern char __tcm_start, __tcm_end;
4153+#endif
4154
4155+#ifdef CONFIG_PAX_KERNEXEC
4156+ unsigned long addr;
4157+ pgd_t *pgd;
4158+ pud_t *pud;
4159+ pmd_t *pmd;
4160+ int cpu_arch = cpu_architecture();
4161+ unsigned int cr = get_cr();
4162+
4163+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4164+ /* make pages tables, etc before .text NX */
4165+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4166+ pgd = pgd_offset_k(addr);
4167+ pud = pud_offset(pgd, addr);
4168+ pmd = pmd_offset(pud, addr);
4169+ __section_update(pmd, addr, PMD_SECT_XN);
4170+ }
4171+ /* make init NX */
4172+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4173+ pgd = pgd_offset_k(addr);
4174+ pud = pud_offset(pgd, addr);
4175+ pmd = pmd_offset(pud, addr);
4176+ __section_update(pmd, addr, PMD_SECT_XN);
4177+ }
4178+ /* make kernel code/rodata RX */
4179+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4180+ pgd = pgd_offset_k(addr);
4181+ pud = pud_offset(pgd, addr);
4182+ pmd = pmd_offset(pud, addr);
4183+#ifdef CONFIG_ARM_LPAE
4184+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4185+#else
4186+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4187+#endif
4188+ }
4189+ }
4190+#endif
4191+
4192+#ifdef CONFIG_HAVE_TCM
4193 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4194 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4195 #endif
4196diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4197index d1e5ad7..84dcbf2 100644
4198--- a/arch/arm/mm/ioremap.c
4199+++ b/arch/arm/mm/ioremap.c
4200@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4201 unsigned int mtype;
4202
4203 if (cached)
4204- mtype = MT_MEMORY_RWX;
4205+ mtype = MT_MEMORY_RX;
4206 else
4207- mtype = MT_MEMORY_RWX_NONCACHED;
4208+ mtype = MT_MEMORY_RX_NONCACHED;
4209
4210 return __arm_ioremap_caller(phys_addr, size, mtype,
4211 __builtin_return_address(0));
4212diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4213index 5e85ed3..b10a7ed 100644
4214--- a/arch/arm/mm/mmap.c
4215+++ b/arch/arm/mm/mmap.c
4216@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4217 struct vm_area_struct *vma;
4218 int do_align = 0;
4219 int aliasing = cache_is_vipt_aliasing();
4220+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4221 struct vm_unmapped_area_info info;
4222
4223 /*
4224@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4225 if (len > TASK_SIZE)
4226 return -ENOMEM;
4227
4228+#ifdef CONFIG_PAX_RANDMMAP
4229+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4230+#endif
4231+
4232 if (addr) {
4233 if (do_align)
4234 addr = COLOUR_ALIGN(addr, pgoff);
4235@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4236 addr = PAGE_ALIGN(addr);
4237
4238 vma = find_vma(mm, addr);
4239- if (TASK_SIZE - len >= addr &&
4240- (!vma || addr + len <= vma->vm_start))
4241+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4242 return addr;
4243 }
4244
4245@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4246 info.high_limit = TASK_SIZE;
4247 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4248 info.align_offset = pgoff << PAGE_SHIFT;
4249+ info.threadstack_offset = offset;
4250 return vm_unmapped_area(&info);
4251 }
4252
4253@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4254 unsigned long addr = addr0;
4255 int do_align = 0;
4256 int aliasing = cache_is_vipt_aliasing();
4257+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4258 struct vm_unmapped_area_info info;
4259
4260 /*
4261@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4262 return addr;
4263 }
4264
4265+#ifdef CONFIG_PAX_RANDMMAP
4266+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4267+#endif
4268+
4269 /* requesting a specific address */
4270 if (addr) {
4271 if (do_align)
4272@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4273 else
4274 addr = PAGE_ALIGN(addr);
4275 vma = find_vma(mm, addr);
4276- if (TASK_SIZE - len >= addr &&
4277- (!vma || addr + len <= vma->vm_start))
4278+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4279 return addr;
4280 }
4281
4282@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4283 info.high_limit = mm->mmap_base;
4284 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4285 info.align_offset = pgoff << PAGE_SHIFT;
4286+ info.threadstack_offset = offset;
4287 addr = vm_unmapped_area(&info);
4288
4289 /*
4290@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4291 {
4292 unsigned long random_factor = 0UL;
4293
4294+#ifdef CONFIG_PAX_RANDMMAP
4295+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4296+#endif
4297+
4298 /* 8 bits of randomness in 20 address space bits */
4299 if ((current->flags & PF_RANDOMIZE) &&
4300 !(current->personality & ADDR_NO_RANDOMIZE))
4301@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4302
4303 if (mmap_is_legacy()) {
4304 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4305+
4306+#ifdef CONFIG_PAX_RANDMMAP
4307+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4308+ mm->mmap_base += mm->delta_mmap;
4309+#endif
4310+
4311 mm->get_unmapped_area = arch_get_unmapped_area;
4312 } else {
4313 mm->mmap_base = mmap_base(random_factor);
4314+
4315+#ifdef CONFIG_PAX_RANDMMAP
4316+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4317+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4318+#endif
4319+
4320 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4321 }
4322 }
4323diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4324index 4e6ef89..21c27f2 100644
4325--- a/arch/arm/mm/mmu.c
4326+++ b/arch/arm/mm/mmu.c
4327@@ -41,6 +41,22 @@
4328 #include "mm.h"
4329 #include "tcm.h"
4330
4331+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4332+void modify_domain(unsigned int dom, unsigned int type)
4333+{
4334+ struct thread_info *thread = current_thread_info();
4335+ unsigned int domain = thread->cpu_domain;
4336+ /*
4337+ * DOMAIN_MANAGER might be defined to some other value,
4338+ * use the arch-defined constant
4339+ */
4340+ domain &= ~domain_val(dom, 3);
4341+ thread->cpu_domain = domain | domain_val(dom, type);
4342+ set_domain(thread->cpu_domain);
4343+}
4344+EXPORT_SYMBOL(modify_domain);
4345+#endif
4346+
4347 /*
4348 * empty_zero_page is a special page that is used for
4349 * zero-initialized data and COW.
4350@@ -242,7 +258,15 @@ __setup("noalign", noalign_setup);
4351 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4352 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4353
4354-static struct mem_type mem_types[] = {
4355+#ifdef CONFIG_PAX_KERNEXEC
4356+#define L_PTE_KERNEXEC L_PTE_RDONLY
4357+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4358+#else
4359+#define L_PTE_KERNEXEC L_PTE_DIRTY
4360+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4361+#endif
4362+
4363+static struct mem_type mem_types[] __read_only = {
4364 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4365 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4366 L_PTE_SHARED,
4367@@ -271,19 +295,19 @@ static struct mem_type mem_types[] = {
4368 .prot_sect = PROT_SECT_DEVICE,
4369 .domain = DOMAIN_IO,
4370 },
4371- [MT_UNCACHED] = {
4372+ [MT_UNCACHED_RW] = {
4373 .prot_pte = PROT_PTE_DEVICE,
4374 .prot_l1 = PMD_TYPE_TABLE,
4375 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4376 .domain = DOMAIN_IO,
4377 },
4378- [MT_CACHECLEAN] = {
4379- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4380+ [MT_CACHECLEAN_RO] = {
4381+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
4382 .domain = DOMAIN_KERNEL,
4383 },
4384 #ifndef CONFIG_ARM_LPAE
4385- [MT_MINICLEAN] = {
4386- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4387+ [MT_MINICLEAN_RO] = {
4388+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
4389 .domain = DOMAIN_KERNEL,
4390 },
4391 #endif
4392@@ -291,15 +315,15 @@ static struct mem_type mem_types[] = {
4393 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4394 L_PTE_RDONLY,
4395 .prot_l1 = PMD_TYPE_TABLE,
4396- .domain = DOMAIN_USER,
4397+ .domain = DOMAIN_VECTORS,
4398 },
4399 [MT_HIGH_VECTORS] = {
4400 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4401 L_PTE_USER | L_PTE_RDONLY,
4402 .prot_l1 = PMD_TYPE_TABLE,
4403- .domain = DOMAIN_USER,
4404+ .domain = DOMAIN_VECTORS,
4405 },
4406- [MT_MEMORY_RWX] = {
4407+ [__MT_MEMORY_RWX] = {
4408 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4409 .prot_l1 = PMD_TYPE_TABLE,
4410 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4411@@ -312,17 +336,30 @@ static struct mem_type mem_types[] = {
4412 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4413 .domain = DOMAIN_KERNEL,
4414 },
4415- [MT_ROM] = {
4416- .prot_sect = PMD_TYPE_SECT,
4417+ [MT_MEMORY_RX] = {
4418+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4419+ .prot_l1 = PMD_TYPE_TABLE,
4420+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4421+ .domain = DOMAIN_KERNEL,
4422+ },
4423+ [MT_ROM_RX] = {
4424+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4425 .domain = DOMAIN_KERNEL,
4426 },
4427- [MT_MEMORY_RWX_NONCACHED] = {
4428+ [MT_MEMORY_RW_NONCACHED] = {
4429 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4430 L_PTE_MT_BUFFERABLE,
4431 .prot_l1 = PMD_TYPE_TABLE,
4432 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4433 .domain = DOMAIN_KERNEL,
4434 },
4435+ [MT_MEMORY_RX_NONCACHED] = {
4436+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4437+ L_PTE_MT_BUFFERABLE,
4438+ .prot_l1 = PMD_TYPE_TABLE,
4439+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4440+ .domain = DOMAIN_KERNEL,
4441+ },
4442 [MT_MEMORY_RW_DTCM] = {
4443 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4444 L_PTE_XN,
4445@@ -330,9 +367,10 @@ static struct mem_type mem_types[] = {
4446 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4447 .domain = DOMAIN_KERNEL,
4448 },
4449- [MT_MEMORY_RWX_ITCM] = {
4450- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4451+ [MT_MEMORY_RX_ITCM] = {
4452+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4453 .prot_l1 = PMD_TYPE_TABLE,
4454+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4455 .domain = DOMAIN_KERNEL,
4456 },
4457 [MT_MEMORY_RW_SO] = {
4458@@ -544,9 +582,14 @@ static void __init build_mem_type_table(void)
4459 * Mark cache clean areas and XIP ROM read only
4460 * from SVC mode and no access from userspace.
4461 */
4462- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4463- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4464- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4465+ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4466+#ifdef CONFIG_PAX_KERNEXEC
4467+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4468+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4469+ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4470+#endif
4471+ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4472+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4473 #endif
4474
4475 /*
4476@@ -563,13 +606,17 @@ static void __init build_mem_type_table(void)
4477 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4478 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4479 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4480- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4481- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4482+ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4483+ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4484 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4485 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4486+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4487+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4488 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4489- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
4490- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
4491+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
4492+ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
4493+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
4494+ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
4495 }
4496 }
4497
4498@@ -580,15 +627,20 @@ static void __init build_mem_type_table(void)
4499 if (cpu_arch >= CPU_ARCH_ARMv6) {
4500 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4501 /* Non-cacheable Normal is XCB = 001 */
4502- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4503+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4504+ PMD_SECT_BUFFERED;
4505+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4506 PMD_SECT_BUFFERED;
4507 } else {
4508 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4509- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4510+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4511+ PMD_SECT_TEX(1);
4512+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4513 PMD_SECT_TEX(1);
4514 }
4515 } else {
4516- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4517+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4518+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4519 }
4520
4521 #ifdef CONFIG_ARM_LPAE
4522@@ -609,6 +661,8 @@ static void __init build_mem_type_table(void)
4523 user_pgprot |= PTE_EXT_PXN;
4524 #endif
4525
4526+ user_pgprot |= __supported_pte_mask;
4527+
4528 for (i = 0; i < 16; i++) {
4529 pteval_t v = pgprot_val(protection_map[i]);
4530 protection_map[i] = __pgprot(v | user_pgprot);
4531@@ -626,21 +680,24 @@ static void __init build_mem_type_table(void)
4532
4533 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4534 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4535- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4536- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4537+ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4538+ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4539 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4540 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4541+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4542+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4543 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4544- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
4545- mem_types[MT_ROM].prot_sect |= cp->pmd;
4546+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
4547+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
4548+ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
4549
4550 switch (cp->pmd) {
4551 case PMD_SECT_WT:
4552- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
4553+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
4554 break;
4555 case PMD_SECT_WB:
4556 case PMD_SECT_WBWA:
4557- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
4558+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
4559 break;
4560 }
4561 pr_info("Memory policy: %sData cache %s\n",
4562@@ -854,7 +911,7 @@ static void __init create_mapping(struct map_desc *md)
4563 return;
4564 }
4565
4566- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
4567+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
4568 md->virtual >= PAGE_OFFSET &&
4569 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
4570 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
4571@@ -1218,18 +1275,15 @@ void __init arm_mm_memblock_reserve(void)
4572 * called function. This means you can't use any function or debugging
4573 * method which may touch any device, otherwise the kernel _will_ crash.
4574 */
4575+
4576+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4577+
4578 static void __init devicemaps_init(const struct machine_desc *mdesc)
4579 {
4580 struct map_desc map;
4581 unsigned long addr;
4582- void *vectors;
4583
4584- /*
4585- * Allocate the vector page early.
4586- */
4587- vectors = early_alloc(PAGE_SIZE * 2);
4588-
4589- early_trap_init(vectors);
4590+ early_trap_init(&vectors);
4591
4592 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4593 pmd_clear(pmd_off_k(addr));
4594@@ -1242,7 +1296,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4595 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
4596 map.virtual = MODULES_VADDR;
4597 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
4598- map.type = MT_ROM;
4599+ map.type = MT_ROM_RX;
4600 create_mapping(&map);
4601 #endif
4602
4603@@ -1253,14 +1307,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4604 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
4605 map.virtual = FLUSH_BASE;
4606 map.length = SZ_1M;
4607- map.type = MT_CACHECLEAN;
4608+ map.type = MT_CACHECLEAN_RO;
4609 create_mapping(&map);
4610 #endif
4611 #ifdef FLUSH_BASE_MINICACHE
4612 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
4613 map.virtual = FLUSH_BASE_MINICACHE;
4614 map.length = SZ_1M;
4615- map.type = MT_MINICLEAN;
4616+ map.type = MT_MINICLEAN_RO;
4617 create_mapping(&map);
4618 #endif
4619
4620@@ -1269,7 +1323,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4621 * location (0xffff0000). If we aren't using high-vectors, also
4622 * create a mapping at the low-vectors virtual address.
4623 */
4624- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4625+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4626 map.virtual = 0xffff0000;
4627 map.length = PAGE_SIZE;
4628 #ifdef CONFIG_KUSER_HELPERS
4629@@ -1329,8 +1383,10 @@ static void __init kmap_init(void)
4630 static void __init map_lowmem(void)
4631 {
4632 struct memblock_region *reg;
4633+#ifndef CONFIG_PAX_KERNEXEC
4634 phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
4635 phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
4636+#endif
4637
4638 /* Map all the lowmem memory banks. */
4639 for_each_memblock(memory, reg) {
4640@@ -1343,11 +1399,48 @@ static void __init map_lowmem(void)
4641 if (start >= end)
4642 break;
4643
4644+#ifdef CONFIG_PAX_KERNEXEC
4645+ map.pfn = __phys_to_pfn(start);
4646+ map.virtual = __phys_to_virt(start);
4647+ map.length = end - start;
4648+
4649+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4650+ struct map_desc kernel;
4651+ struct map_desc initmap;
4652+
4653+ /* when freeing initmem we will make this RW */
4654+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4655+ initmap.virtual = (unsigned long)__init_begin;
4656+ initmap.length = _sdata - __init_begin;
4657+ initmap.type = __MT_MEMORY_RWX;
4658+ create_mapping(&initmap);
4659+
4660+ /* when freeing initmem we will make this RX */
4661+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4662+ kernel.virtual = (unsigned long)_stext;
4663+ kernel.length = __init_begin - _stext;
4664+ kernel.type = __MT_MEMORY_RWX;
4665+ create_mapping(&kernel);
4666+
4667+ if (map.virtual < (unsigned long)_stext) {
4668+ map.length = (unsigned long)_stext - map.virtual;
4669+ map.type = __MT_MEMORY_RWX;
4670+ create_mapping(&map);
4671+ }
4672+
4673+ map.pfn = __phys_to_pfn(__pa(_sdata));
4674+ map.virtual = (unsigned long)_sdata;
4675+ map.length = end - __pa(_sdata);
4676+ }
4677+
4678+ map.type = MT_MEMORY_RW;
4679+ create_mapping(&map);
4680+#else
4681 if (end < kernel_x_start) {
4682 map.pfn = __phys_to_pfn(start);
4683 map.virtual = __phys_to_virt(start);
4684 map.length = end - start;
4685- map.type = MT_MEMORY_RWX;
4686+ map.type = __MT_MEMORY_RWX;
4687
4688 create_mapping(&map);
4689 } else if (start >= kernel_x_end) {
4690@@ -1371,7 +1464,7 @@ static void __init map_lowmem(void)
4691 map.pfn = __phys_to_pfn(kernel_x_start);
4692 map.virtual = __phys_to_virt(kernel_x_start);
4693 map.length = kernel_x_end - kernel_x_start;
4694- map.type = MT_MEMORY_RWX;
4695+ map.type = __MT_MEMORY_RWX;
4696
4697 create_mapping(&map);
4698
4699@@ -1384,6 +1477,7 @@ static void __init map_lowmem(void)
4700 create_mapping(&map);
4701 }
4702 }
4703+#endif
4704 }
4705 }
4706
4707diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
4708index f412b53..fc89433 100644
4709--- a/arch/arm/net/bpf_jit_32.c
4710+++ b/arch/arm/net/bpf_jit_32.c
4711@@ -20,6 +20,7 @@
4712 #include <asm/cacheflush.h>
4713 #include <asm/hwcap.h>
4714 #include <asm/opcodes.h>
4715+#include <asm/pgtable.h>
4716
4717 #include "bpf_jit_32.h"
4718
4719@@ -71,7 +72,11 @@ struct jit_ctx {
4720 #endif
4721 };
4722
4723+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
4724+int bpf_jit_enable __read_only;
4725+#else
4726 int bpf_jit_enable __read_mostly;
4727+#endif
4728
4729 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
4730 {
4731@@ -178,8 +183,10 @@ static void jit_fill_hole(void *area, unsigned int size)
4732 {
4733 u32 *ptr;
4734 /* We are guaranteed to have aligned memory. */
4735+ pax_open_kernel();
4736 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
4737 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
4738+ pax_close_kernel();
4739 }
4740
4741 static void build_prologue(struct jit_ctx *ctx)
4742diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
4743index 5b217f4..c23f40e 100644
4744--- a/arch/arm/plat-iop/setup.c
4745+++ b/arch/arm/plat-iop/setup.c
4746@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
4747 .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
4748 .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
4749 .length = IOP3XX_PERIPHERAL_SIZE,
4750- .type = MT_UNCACHED,
4751+ .type = MT_UNCACHED_RW,
4752 },
4753 };
4754
4755diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4756index a5bc92d..0bb4730 100644
4757--- a/arch/arm/plat-omap/sram.c
4758+++ b/arch/arm/plat-omap/sram.c
4759@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4760 * Looks like we need to preserve some bootloader code at the
4761 * beginning of SRAM for jumping to flash for reboot to work...
4762 */
4763+ pax_open_kernel();
4764 memset_io(omap_sram_base + omap_sram_skip, 0,
4765 omap_sram_size - omap_sram_skip);
4766+ pax_close_kernel();
4767 }
4768diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
4769index 7047051..44e8675 100644
4770--- a/arch/arm64/include/asm/atomic.h
4771+++ b/arch/arm64/include/asm/atomic.h
4772@@ -252,5 +252,15 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
4773 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
4774 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
4775
4776+#define atomic64_read_unchecked(v) atomic64_read(v)
4777+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4778+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4779+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4780+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4781+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4782+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4783+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4784+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4785+
4786 #endif
4787 #endif
4788diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
4789index a5abb00..9cbca9a 100644
4790--- a/arch/arm64/include/asm/barrier.h
4791+++ b/arch/arm64/include/asm/barrier.h
4792@@ -44,7 +44,7 @@
4793 do { \
4794 compiletime_assert_atomic_type(*p); \
4795 barrier(); \
4796- ACCESS_ONCE(*p) = (v); \
4797+ ACCESS_ONCE_RW(*p) = (v); \
4798 } while (0)
4799
4800 #define smp_load_acquire(p) \
4801diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
4802index 4fde8c1..441f84f 100644
4803--- a/arch/arm64/include/asm/percpu.h
4804+++ b/arch/arm64/include/asm/percpu.h
4805@@ -135,16 +135,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
4806 {
4807 switch (size) {
4808 case 1:
4809- ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
4810+ ACCESS_ONCE_RW(*(u8 *)ptr) = (u8)val;
4811 break;
4812 case 2:
4813- ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
4814+ ACCESS_ONCE_RW(*(u16 *)ptr) = (u16)val;
4815 break;
4816 case 4:
4817- ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
4818+ ACCESS_ONCE_RW(*(u32 *)ptr) = (u32)val;
4819 break;
4820 case 8:
4821- ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
4822+ ACCESS_ONCE_RW(*(u64 *)ptr) = (u64)val;
4823 break;
4824 default:
4825 BUILD_BUG();
4826diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
4827index e20df38..027ede3 100644
4828--- a/arch/arm64/include/asm/pgalloc.h
4829+++ b/arch/arm64/include/asm/pgalloc.h
4830@@ -46,6 +46,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4831 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
4832 }
4833
4834+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4835+{
4836+ pud_populate(mm, pud, pmd);
4837+}
4838+
4839 #endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
4840
4841 #if CONFIG_ARM64_PGTABLE_LEVELS > 3
4842diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
4843index 07e1ba44..ec8cbbb 100644
4844--- a/arch/arm64/include/asm/uaccess.h
4845+++ b/arch/arm64/include/asm/uaccess.h
4846@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
4847 flag; \
4848 })
4849
4850+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
4851 #define access_ok(type, addr, size) __range_ok(addr, size)
4852 #define user_addr_max get_fs
4853
4854diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
4855index b0bd4e5..54e82f6 100644
4856--- a/arch/arm64/mm/dma-mapping.c
4857+++ b/arch/arm64/mm/dma-mapping.c
4858@@ -134,7 +134,7 @@ static void __dma_free_coherent(struct device *dev, size_t size,
4859 phys_to_page(paddr),
4860 size >> PAGE_SHIFT);
4861 if (!freed)
4862- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
4863+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
4864 }
4865
4866 static void *__dma_alloc(struct device *dev, size_t size,
4867diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4868index c3a58a1..78fbf54 100644
4869--- a/arch/avr32/include/asm/cache.h
4870+++ b/arch/avr32/include/asm/cache.h
4871@@ -1,8 +1,10 @@
4872 #ifndef __ASM_AVR32_CACHE_H
4873 #define __ASM_AVR32_CACHE_H
4874
4875+#include <linux/const.h>
4876+
4877 #define L1_CACHE_SHIFT 5
4878-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4879+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4880
4881 /*
4882 * Memory returned by kmalloc() may be used for DMA, so we must make
4883diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4884index d232888..87c8df1 100644
4885--- a/arch/avr32/include/asm/elf.h
4886+++ b/arch/avr32/include/asm/elf.h
4887@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4888 the loader. We need to make sure that it is out of the way of the program
4889 that it will "exec", and that there is sufficient room for the brk. */
4890
4891-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4892+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4893
4894+#ifdef CONFIG_PAX_ASLR
4895+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4896+
4897+#define PAX_DELTA_MMAP_LEN 15
4898+#define PAX_DELTA_STACK_LEN 15
4899+#endif
4900
4901 /* This yields a mask that user programs can use to figure out what
4902 instruction set this CPU supports. This could be done in user space,
4903diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4904index 479330b..53717a8 100644
4905--- a/arch/avr32/include/asm/kmap_types.h
4906+++ b/arch/avr32/include/asm/kmap_types.h
4907@@ -2,9 +2,9 @@
4908 #define __ASM_AVR32_KMAP_TYPES_H
4909
4910 #ifdef CONFIG_DEBUG_HIGHMEM
4911-# define KM_TYPE_NR 29
4912+# define KM_TYPE_NR 30
4913 #else
4914-# define KM_TYPE_NR 14
4915+# define KM_TYPE_NR 15
4916 #endif
4917
4918 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4919diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4920index d223a8b..69c5210 100644
4921--- a/arch/avr32/mm/fault.c
4922+++ b/arch/avr32/mm/fault.c
4923@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4924
4925 int exception_trace = 1;
4926
4927+#ifdef CONFIG_PAX_PAGEEXEC
4928+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4929+{
4930+ unsigned long i;
4931+
4932+ printk(KERN_ERR "PAX: bytes at PC: ");
4933+ for (i = 0; i < 20; i++) {
4934+ unsigned char c;
4935+ if (get_user(c, (unsigned char *)pc+i))
4936+ printk(KERN_CONT "???????? ");
4937+ else
4938+ printk(KERN_CONT "%02x ", c);
4939+ }
4940+ printk("\n");
4941+}
4942+#endif
4943+
4944 /*
4945 * This routine handles page faults. It determines the address and the
4946 * problem, and then passes it off to one of the appropriate routines.
4947@@ -178,6 +195,16 @@ bad_area:
4948 up_read(&mm->mmap_sem);
4949
4950 if (user_mode(regs)) {
4951+
4952+#ifdef CONFIG_PAX_PAGEEXEC
4953+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4954+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4955+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4956+ do_group_exit(SIGKILL);
4957+ }
4958+ }
4959+#endif
4960+
4961 if (exception_trace && printk_ratelimit())
4962 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4963 "sp %08lx ecr %lu\n",
4964diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4965index 568885a..f8008df 100644
4966--- a/arch/blackfin/include/asm/cache.h
4967+++ b/arch/blackfin/include/asm/cache.h
4968@@ -7,6 +7,7 @@
4969 #ifndef __ARCH_BLACKFIN_CACHE_H
4970 #define __ARCH_BLACKFIN_CACHE_H
4971
4972+#include <linux/const.h>
4973 #include <linux/linkage.h> /* for asmlinkage */
4974
4975 /*
4976@@ -14,7 +15,7 @@
4977 * Blackfin loads 32 bytes for cache
4978 */
4979 #define L1_CACHE_SHIFT 5
4980-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4981+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4982 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4983
4984 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4985diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4986index aea2718..3639a60 100644
4987--- a/arch/cris/include/arch-v10/arch/cache.h
4988+++ b/arch/cris/include/arch-v10/arch/cache.h
4989@@ -1,8 +1,9 @@
4990 #ifndef _ASM_ARCH_CACHE_H
4991 #define _ASM_ARCH_CACHE_H
4992
4993+#include <linux/const.h>
4994 /* Etrax 100LX have 32-byte cache-lines. */
4995-#define L1_CACHE_BYTES 32
4996 #define L1_CACHE_SHIFT 5
4997+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4998
4999 #endif /* _ASM_ARCH_CACHE_H */
5000diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
5001index 7caf25d..ee65ac5 100644
5002--- a/arch/cris/include/arch-v32/arch/cache.h
5003+++ b/arch/cris/include/arch-v32/arch/cache.h
5004@@ -1,11 +1,12 @@
5005 #ifndef _ASM_CRIS_ARCH_CACHE_H
5006 #define _ASM_CRIS_ARCH_CACHE_H
5007
5008+#include <linux/const.h>
5009 #include <arch/hwregs/dma.h>
5010
5011 /* A cache-line is 32 bytes. */
5012-#define L1_CACHE_BYTES 32
5013 #define L1_CACHE_SHIFT 5
5014+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5015
5016 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5017
5018diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
5019index 102190a..5334cea 100644
5020--- a/arch/frv/include/asm/atomic.h
5021+++ b/arch/frv/include/asm/atomic.h
5022@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
5023 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
5024 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
5025
5026+#define atomic64_read_unchecked(v) atomic64_read(v)
5027+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5028+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5029+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5030+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5031+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5032+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5033+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5034+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5035+
5036 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5037 {
5038 int c, old;
5039diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
5040index 2797163..c2a401df9 100644
5041--- a/arch/frv/include/asm/cache.h
5042+++ b/arch/frv/include/asm/cache.h
5043@@ -12,10 +12,11 @@
5044 #ifndef __ASM_CACHE_H
5045 #define __ASM_CACHE_H
5046
5047+#include <linux/const.h>
5048
5049 /* bytes per L1 cache line */
5050 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
5051-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5052+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5053
5054 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5055 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5056diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
5057index 43901f2..0d8b865 100644
5058--- a/arch/frv/include/asm/kmap_types.h
5059+++ b/arch/frv/include/asm/kmap_types.h
5060@@ -2,6 +2,6 @@
5061 #ifndef _ASM_KMAP_TYPES_H
5062 #define _ASM_KMAP_TYPES_H
5063
5064-#define KM_TYPE_NR 17
5065+#define KM_TYPE_NR 18
5066
5067 #endif
5068diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
5069index 836f147..4cf23f5 100644
5070--- a/arch/frv/mm/elf-fdpic.c
5071+++ b/arch/frv/mm/elf-fdpic.c
5072@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5073 {
5074 struct vm_area_struct *vma;
5075 struct vm_unmapped_area_info info;
5076+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5077
5078 if (len > TASK_SIZE)
5079 return -ENOMEM;
5080@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5081 if (addr) {
5082 addr = PAGE_ALIGN(addr);
5083 vma = find_vma(current->mm, addr);
5084- if (TASK_SIZE - len >= addr &&
5085- (!vma || addr + len <= vma->vm_start))
5086+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5087 goto success;
5088 }
5089
5090@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5091 info.high_limit = (current->mm->start_stack - 0x00200000);
5092 info.align_mask = 0;
5093 info.align_offset = 0;
5094+ info.threadstack_offset = offset;
5095 addr = vm_unmapped_area(&info);
5096 if (!(addr & ~PAGE_MASK))
5097 goto success;
5098diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
5099index 69952c18..4fa2908 100644
5100--- a/arch/hexagon/include/asm/cache.h
5101+++ b/arch/hexagon/include/asm/cache.h
5102@@ -21,9 +21,11 @@
5103 #ifndef __ASM_CACHE_H
5104 #define __ASM_CACHE_H
5105
5106+#include <linux/const.h>
5107+
5108 /* Bytes per L1 cache line */
5109-#define L1_CACHE_SHIFT (5)
5110-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5111+#define L1_CACHE_SHIFT 5
5112+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5113
5114 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5115
5116diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
5117index 074e52b..76afdac 100644
5118--- a/arch/ia64/Kconfig
5119+++ b/arch/ia64/Kconfig
5120@@ -548,6 +548,7 @@ source "drivers/sn/Kconfig"
5121 config KEXEC
5122 bool "kexec system call"
5123 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
5124+ depends on !GRKERNSEC_KMEM
5125 help
5126 kexec is a system call that implements the ability to shutdown your
5127 current kernel, and to start another kernel. It is like a reboot
5128diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
5129index 970d0bd..e750b9b 100644
5130--- a/arch/ia64/Makefile
5131+++ b/arch/ia64/Makefile
5132@@ -98,5 +98,6 @@ endef
5133 archprepare: make_nr_irqs_h FORCE
5134 PHONY += make_nr_irqs_h FORCE
5135
5136+make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
5137 make_nr_irqs_h: FORCE
5138 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
5139diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
5140index 0bf0350..2ad1957 100644
5141--- a/arch/ia64/include/asm/atomic.h
5142+++ b/arch/ia64/include/asm/atomic.h
5143@@ -193,4 +193,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
5144 #define atomic64_inc(v) atomic64_add(1, (v))
5145 #define atomic64_dec(v) atomic64_sub(1, (v))
5146
5147+#define atomic64_read_unchecked(v) atomic64_read(v)
5148+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5149+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5150+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5151+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5152+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5153+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5154+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5155+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5156+
5157 #endif /* _ASM_IA64_ATOMIC_H */
5158diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
5159index f6769eb..1cdb590 100644
5160--- a/arch/ia64/include/asm/barrier.h
5161+++ b/arch/ia64/include/asm/barrier.h
5162@@ -66,7 +66,7 @@
5163 do { \
5164 compiletime_assert_atomic_type(*p); \
5165 barrier(); \
5166- ACCESS_ONCE(*p) = (v); \
5167+ ACCESS_ONCE_RW(*p) = (v); \
5168 } while (0)
5169
5170 #define smp_load_acquire(p) \
5171diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
5172index 988254a..e1ee885 100644
5173--- a/arch/ia64/include/asm/cache.h
5174+++ b/arch/ia64/include/asm/cache.h
5175@@ -1,6 +1,7 @@
5176 #ifndef _ASM_IA64_CACHE_H
5177 #define _ASM_IA64_CACHE_H
5178
5179+#include <linux/const.h>
5180
5181 /*
5182 * Copyright (C) 1998-2000 Hewlett-Packard Co
5183@@ -9,7 +10,7 @@
5184
5185 /* Bytes per L1 (data) cache line. */
5186 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
5187-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5188+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5189
5190 #ifdef CONFIG_SMP
5191 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5192diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
5193index 5a83c5c..4d7f553 100644
5194--- a/arch/ia64/include/asm/elf.h
5195+++ b/arch/ia64/include/asm/elf.h
5196@@ -42,6 +42,13 @@
5197 */
5198 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
5199
5200+#ifdef CONFIG_PAX_ASLR
5201+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
5202+
5203+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5204+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5205+#endif
5206+
5207 #define PT_IA_64_UNWIND 0x70000001
5208
5209 /* IA-64 relocations: */
5210diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
5211index 5767cdf..7462574 100644
5212--- a/arch/ia64/include/asm/pgalloc.h
5213+++ b/arch/ia64/include/asm/pgalloc.h
5214@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5215 pgd_val(*pgd_entry) = __pa(pud);
5216 }
5217
5218+static inline void
5219+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5220+{
5221+ pgd_populate(mm, pgd_entry, pud);
5222+}
5223+
5224 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5225 {
5226 return quicklist_alloc(0, GFP_KERNEL, NULL);
5227@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5228 pud_val(*pud_entry) = __pa(pmd);
5229 }
5230
5231+static inline void
5232+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5233+{
5234+ pud_populate(mm, pud_entry, pmd);
5235+}
5236+
5237 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5238 {
5239 return quicklist_alloc(0, GFP_KERNEL, NULL);
5240diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5241index 7b6f880..ac8e008 100644
5242--- a/arch/ia64/include/asm/pgtable.h
5243+++ b/arch/ia64/include/asm/pgtable.h
5244@@ -12,7 +12,7 @@
5245 * David Mosberger-Tang <davidm@hpl.hp.com>
5246 */
5247
5248-
5249+#include <linux/const.h>
5250 #include <asm/mman.h>
5251 #include <asm/page.h>
5252 #include <asm/processor.h>
5253@@ -139,6 +139,17 @@
5254 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5255 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5256 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5257+
5258+#ifdef CONFIG_PAX_PAGEEXEC
5259+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5260+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5261+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5262+#else
5263+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5264+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5265+# define PAGE_COPY_NOEXEC PAGE_COPY
5266+#endif
5267+
5268 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5269 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5270 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5271diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5272index 45698cd..e8e2dbc 100644
5273--- a/arch/ia64/include/asm/spinlock.h
5274+++ b/arch/ia64/include/asm/spinlock.h
5275@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5276 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5277
5278 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5279- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5280+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5281 }
5282
5283 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5284diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5285index 4f3fb6cc..254055e 100644
5286--- a/arch/ia64/include/asm/uaccess.h
5287+++ b/arch/ia64/include/asm/uaccess.h
5288@@ -70,6 +70,7 @@
5289 && ((segment).seg == KERNEL_DS.seg \
5290 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5291 })
5292+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5293 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5294
5295 /*
5296@@ -241,12 +242,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5297 static inline unsigned long
5298 __copy_to_user (void __user *to, const void *from, unsigned long count)
5299 {
5300+ if (count > INT_MAX)
5301+ return count;
5302+
5303+ if (!__builtin_constant_p(count))
5304+ check_object_size(from, count, true);
5305+
5306 return __copy_user(to, (__force void __user *) from, count);
5307 }
5308
5309 static inline unsigned long
5310 __copy_from_user (void *to, const void __user *from, unsigned long count)
5311 {
5312+ if (count > INT_MAX)
5313+ return count;
5314+
5315+ if (!__builtin_constant_p(count))
5316+ check_object_size(to, count, false);
5317+
5318 return __copy_user((__force void __user *) to, from, count);
5319 }
5320
5321@@ -256,10 +269,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5322 ({ \
5323 void __user *__cu_to = (to); \
5324 const void *__cu_from = (from); \
5325- long __cu_len = (n); \
5326+ unsigned long __cu_len = (n); \
5327 \
5328- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5329+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5330+ if (!__builtin_constant_p(n)) \
5331+ check_object_size(__cu_from, __cu_len, true); \
5332 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5333+ } \
5334 __cu_len; \
5335 })
5336
5337@@ -267,11 +283,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5338 ({ \
5339 void *__cu_to = (to); \
5340 const void __user *__cu_from = (from); \
5341- long __cu_len = (n); \
5342+ unsigned long __cu_len = (n); \
5343 \
5344 __chk_user_ptr(__cu_from); \
5345- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5346+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5347+ if (!__builtin_constant_p(n)) \
5348+ check_object_size(__cu_to, __cu_len, false); \
5349 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5350+ } \
5351 __cu_len; \
5352 })
5353
5354diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5355index 29754aa..06d2838 100644
5356--- a/arch/ia64/kernel/module.c
5357+++ b/arch/ia64/kernel/module.c
5358@@ -492,15 +492,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5359 }
5360
5361 static inline int
5362+in_init_rx (const struct module *mod, uint64_t addr)
5363+{
5364+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5365+}
5366+
5367+static inline int
5368+in_init_rw (const struct module *mod, uint64_t addr)
5369+{
5370+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5371+}
5372+
5373+static inline int
5374 in_init (const struct module *mod, uint64_t addr)
5375 {
5376- return addr - (uint64_t) mod->module_init < mod->init_size;
5377+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5378+}
5379+
5380+static inline int
5381+in_core_rx (const struct module *mod, uint64_t addr)
5382+{
5383+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5384+}
5385+
5386+static inline int
5387+in_core_rw (const struct module *mod, uint64_t addr)
5388+{
5389+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5390 }
5391
5392 static inline int
5393 in_core (const struct module *mod, uint64_t addr)
5394 {
5395- return addr - (uint64_t) mod->module_core < mod->core_size;
5396+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5397 }
5398
5399 static inline int
5400@@ -683,7 +707,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5401 break;
5402
5403 case RV_BDREL:
5404- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5405+ if (in_init_rx(mod, val))
5406+ val -= (uint64_t) mod->module_init_rx;
5407+ else if (in_init_rw(mod, val))
5408+ val -= (uint64_t) mod->module_init_rw;
5409+ else if (in_core_rx(mod, val))
5410+ val -= (uint64_t) mod->module_core_rx;
5411+ else if (in_core_rw(mod, val))
5412+ val -= (uint64_t) mod->module_core_rw;
5413 break;
5414
5415 case RV_LTV:
5416@@ -818,15 +849,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5417 * addresses have been selected...
5418 */
5419 uint64_t gp;
5420- if (mod->core_size > MAX_LTOFF)
5421+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5422 /*
5423 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5424 * at the end of the module.
5425 */
5426- gp = mod->core_size - MAX_LTOFF / 2;
5427+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5428 else
5429- gp = mod->core_size / 2;
5430- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5431+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5432+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5433 mod->arch.gp = gp;
5434 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5435 }
5436diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5437index c39c3cd..3c77738 100644
5438--- a/arch/ia64/kernel/palinfo.c
5439+++ b/arch/ia64/kernel/palinfo.c
5440@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5441 return NOTIFY_OK;
5442 }
5443
5444-static struct notifier_block __refdata palinfo_cpu_notifier =
5445+static struct notifier_block palinfo_cpu_notifier =
5446 {
5447 .notifier_call = palinfo_cpu_callback,
5448 .priority = 0,
5449diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5450index 41e33f8..65180b2a 100644
5451--- a/arch/ia64/kernel/sys_ia64.c
5452+++ b/arch/ia64/kernel/sys_ia64.c
5453@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5454 unsigned long align_mask = 0;
5455 struct mm_struct *mm = current->mm;
5456 struct vm_unmapped_area_info info;
5457+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5458
5459 if (len > RGN_MAP_LIMIT)
5460 return -ENOMEM;
5461@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5462 if (REGION_NUMBER(addr) == RGN_HPAGE)
5463 addr = 0;
5464 #endif
5465+
5466+#ifdef CONFIG_PAX_RANDMMAP
5467+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5468+ addr = mm->free_area_cache;
5469+ else
5470+#endif
5471+
5472 if (!addr)
5473 addr = TASK_UNMAPPED_BASE;
5474
5475@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5476 info.high_limit = TASK_SIZE;
5477 info.align_mask = align_mask;
5478 info.align_offset = 0;
5479+ info.threadstack_offset = offset;
5480 return vm_unmapped_area(&info);
5481 }
5482
5483diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5484index 84f8a52..7c76178 100644
5485--- a/arch/ia64/kernel/vmlinux.lds.S
5486+++ b/arch/ia64/kernel/vmlinux.lds.S
5487@@ -192,7 +192,7 @@ SECTIONS {
5488 /* Per-cpu data: */
5489 . = ALIGN(PERCPU_PAGE_SIZE);
5490 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5491- __phys_per_cpu_start = __per_cpu_load;
5492+ __phys_per_cpu_start = per_cpu_load;
5493 /*
5494 * ensure percpu data fits
5495 * into percpu page size
5496diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5497index ba5ba7a..36e9d3a 100644
5498--- a/arch/ia64/mm/fault.c
5499+++ b/arch/ia64/mm/fault.c
5500@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5501 return pte_present(pte);
5502 }
5503
5504+#ifdef CONFIG_PAX_PAGEEXEC
5505+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5506+{
5507+ unsigned long i;
5508+
5509+ printk(KERN_ERR "PAX: bytes at PC: ");
5510+ for (i = 0; i < 8; i++) {
5511+ unsigned int c;
5512+ if (get_user(c, (unsigned int *)pc+i))
5513+ printk(KERN_CONT "???????? ");
5514+ else
5515+ printk(KERN_CONT "%08x ", c);
5516+ }
5517+ printk("\n");
5518+}
5519+#endif
5520+
5521 # define VM_READ_BIT 0
5522 # define VM_WRITE_BIT 1
5523 # define VM_EXEC_BIT 2
5524@@ -151,8 +168,21 @@ retry:
5525 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5526 goto bad_area;
5527
5528- if ((vma->vm_flags & mask) != mask)
5529+ if ((vma->vm_flags & mask) != mask) {
5530+
5531+#ifdef CONFIG_PAX_PAGEEXEC
5532+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5533+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5534+ goto bad_area;
5535+
5536+ up_read(&mm->mmap_sem);
5537+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5538+ do_group_exit(SIGKILL);
5539+ }
5540+#endif
5541+
5542 goto bad_area;
5543+ }
5544
5545 /*
5546 * If for any reason at all we couldn't handle the fault, make
5547diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5548index 52b7604b..455cb85 100644
5549--- a/arch/ia64/mm/hugetlbpage.c
5550+++ b/arch/ia64/mm/hugetlbpage.c
5551@@ -143,6 +143,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5552 unsigned long pgoff, unsigned long flags)
5553 {
5554 struct vm_unmapped_area_info info;
5555+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5556
5557 if (len > RGN_MAP_LIMIT)
5558 return -ENOMEM;
5559@@ -166,6 +167,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5560 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5561 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5562 info.align_offset = 0;
5563+ info.threadstack_offset = offset;
5564 return vm_unmapped_area(&info);
5565 }
5566
5567diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5568index 6b33457..88b5124 100644
5569--- a/arch/ia64/mm/init.c
5570+++ b/arch/ia64/mm/init.c
5571@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5572 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5573 vma->vm_end = vma->vm_start + PAGE_SIZE;
5574 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5575+
5576+#ifdef CONFIG_PAX_PAGEEXEC
5577+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5578+ vma->vm_flags &= ~VM_EXEC;
5579+
5580+#ifdef CONFIG_PAX_MPROTECT
5581+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5582+ vma->vm_flags &= ~VM_MAYEXEC;
5583+#endif
5584+
5585+ }
5586+#endif
5587+
5588 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5589 down_write(&current->mm->mmap_sem);
5590 if (insert_vm_struct(current->mm, vma)) {
5591@@ -286,7 +299,7 @@ static int __init gate_vma_init(void)
5592 gate_vma.vm_start = FIXADDR_USER_START;
5593 gate_vma.vm_end = FIXADDR_USER_END;
5594 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
5595- gate_vma.vm_page_prot = __P101;
5596+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
5597
5598 return 0;
5599 }
5600diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5601index 40b3ee98..8c2c112 100644
5602--- a/arch/m32r/include/asm/cache.h
5603+++ b/arch/m32r/include/asm/cache.h
5604@@ -1,8 +1,10 @@
5605 #ifndef _ASM_M32R_CACHE_H
5606 #define _ASM_M32R_CACHE_H
5607
5608+#include <linux/const.h>
5609+
5610 /* L1 cache line size */
5611 #define L1_CACHE_SHIFT 4
5612-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5613+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5614
5615 #endif /* _ASM_M32R_CACHE_H */
5616diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5617index 82abd15..d95ae5d 100644
5618--- a/arch/m32r/lib/usercopy.c
5619+++ b/arch/m32r/lib/usercopy.c
5620@@ -14,6 +14,9 @@
5621 unsigned long
5622 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5623 {
5624+ if ((long)n < 0)
5625+ return n;
5626+
5627 prefetch(from);
5628 if (access_ok(VERIFY_WRITE, to, n))
5629 __copy_user(to,from,n);
5630@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5631 unsigned long
5632 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5633 {
5634+ if ((long)n < 0)
5635+ return n;
5636+
5637 prefetchw(to);
5638 if (access_ok(VERIFY_READ, from, n))
5639 __copy_user_zeroing(to,from,n);
5640diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5641index 0395c51..5f26031 100644
5642--- a/arch/m68k/include/asm/cache.h
5643+++ b/arch/m68k/include/asm/cache.h
5644@@ -4,9 +4,11 @@
5645 #ifndef __ARCH_M68K_CACHE_H
5646 #define __ARCH_M68K_CACHE_H
5647
5648+#include <linux/const.h>
5649+
5650 /* bytes per L1 cache line */
5651 #define L1_CACHE_SHIFT 4
5652-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5653+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5654
5655 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5656
5657diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
5658index d703d8e..a8e2d70 100644
5659--- a/arch/metag/include/asm/barrier.h
5660+++ b/arch/metag/include/asm/barrier.h
5661@@ -90,7 +90,7 @@ static inline void fence(void)
5662 do { \
5663 compiletime_assert_atomic_type(*p); \
5664 smp_mb(); \
5665- ACCESS_ONCE(*p) = (v); \
5666+ ACCESS_ONCE_RW(*p) = (v); \
5667 } while (0)
5668
5669 #define smp_load_acquire(p) \
5670diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5671index 7ca80ac..794ba72 100644
5672--- a/arch/metag/mm/hugetlbpage.c
5673+++ b/arch/metag/mm/hugetlbpage.c
5674@@ -194,6 +194,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5675 info.high_limit = TASK_SIZE;
5676 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5677 info.align_offset = 0;
5678+ info.threadstack_offset = 0;
5679 return vm_unmapped_area(&info);
5680 }
5681
5682diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5683index 4efe96a..60e8699 100644
5684--- a/arch/microblaze/include/asm/cache.h
5685+++ b/arch/microblaze/include/asm/cache.h
5686@@ -13,11 +13,12 @@
5687 #ifndef _ASM_MICROBLAZE_CACHE_H
5688 #define _ASM_MICROBLAZE_CACHE_H
5689
5690+#include <linux/const.h>
5691 #include <asm/registers.h>
5692
5693 #define L1_CACHE_SHIFT 5
5694 /* word-granular cache in microblaze */
5695-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5696+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5697
5698 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5699
5700diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5701index 1a313c4..f27b613 100644
5702--- a/arch/mips/Kconfig
5703+++ b/arch/mips/Kconfig
5704@@ -2504,6 +2504,7 @@ source "kernel/Kconfig.preempt"
5705
5706 config KEXEC
5707 bool "Kexec system call"
5708+ depends on !GRKERNSEC_KMEM
5709 help
5710 kexec is a system call that implements the ability to shutdown your
5711 current kernel, and to start another kernel. It is like a reboot
5712diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
5713index d8960d4..77dbd31 100644
5714--- a/arch/mips/cavium-octeon/dma-octeon.c
5715+++ b/arch/mips/cavium-octeon/dma-octeon.c
5716@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
5717 if (dma_release_from_coherent(dev, order, vaddr))
5718 return;
5719
5720- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5721+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
5722 }
5723
5724 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
5725diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5726index 26d4363..3c9a82e 100644
5727--- a/arch/mips/include/asm/atomic.h
5728+++ b/arch/mips/include/asm/atomic.h
5729@@ -22,15 +22,39 @@
5730 #include <asm/cmpxchg.h>
5731 #include <asm/war.h>
5732
5733+#ifdef CONFIG_GENERIC_ATOMIC64
5734+#include <asm-generic/atomic64.h>
5735+#endif
5736+
5737 #define ATOMIC_INIT(i) { (i) }
5738
5739+#ifdef CONFIG_64BIT
5740+#define _ASM_EXTABLE(from, to) \
5741+" .section __ex_table,\"a\"\n" \
5742+" .dword " #from ", " #to"\n" \
5743+" .previous\n"
5744+#else
5745+#define _ASM_EXTABLE(from, to) \
5746+" .section __ex_table,\"a\"\n" \
5747+" .word " #from ", " #to"\n" \
5748+" .previous\n"
5749+#endif
5750+
5751 /*
5752 * atomic_read - read atomic variable
5753 * @v: pointer of type atomic_t
5754 *
5755 * Atomically reads the value of @v.
5756 */
5757-#define atomic_read(v) ACCESS_ONCE((v)->counter)
5758+static inline int atomic_read(const atomic_t *v)
5759+{
5760+ return ACCESS_ONCE(v->counter);
5761+}
5762+
5763+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5764+{
5765+ return ACCESS_ONCE(v->counter);
5766+}
5767
5768 /*
5769 * atomic_set - set atomic variable
5770@@ -39,47 +63,77 @@
5771 *
5772 * Atomically sets the value of @v to @i.
5773 */
5774-#define atomic_set(v, i) ((v)->counter = (i))
5775+static inline void atomic_set(atomic_t *v, int i)
5776+{
5777+ v->counter = i;
5778+}
5779
5780-#define ATOMIC_OP(op, c_op, asm_op) \
5781-static __inline__ void atomic_##op(int i, atomic_t * v) \
5782+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5783+{
5784+ v->counter = i;
5785+}
5786+
5787+#ifdef CONFIG_PAX_REFCOUNT
5788+#define __OVERFLOW_POST \
5789+ " b 4f \n" \
5790+ " .set noreorder \n" \
5791+ "3: b 5f \n" \
5792+ " move %0, %1 \n" \
5793+ " .set reorder \n"
5794+#define __OVERFLOW_EXTABLE \
5795+ "3:\n" \
5796+ _ASM_EXTABLE(2b, 3b)
5797+#else
5798+#define __OVERFLOW_POST
5799+#define __OVERFLOW_EXTABLE
5800+#endif
5801+
5802+#define __ATOMIC_OP(op, suffix, asm_op, extable) \
5803+static inline void atomic_##op##suffix(int i, atomic##suffix##_t * v) \
5804 { \
5805 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
5806 int temp; \
5807 \
5808 __asm__ __volatile__( \
5809- " .set arch=r4000 \n" \
5810- "1: ll %0, %1 # atomic_" #op " \n" \
5811- " " #asm_op " %0, %2 \n" \
5812+ " .set mips3 \n" \
5813+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5814+ "2: " #asm_op " %0, %2 \n" \
5815 " sc %0, %1 \n" \
5816 " beqzl %0, 1b \n" \
5817+ extable \
5818 " .set mips0 \n" \
5819 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5820 : "Ir" (i)); \
5821 } else if (kernel_uses_llsc) { \
5822 int temp; \
5823 \
5824- do { \
5825- __asm__ __volatile__( \
5826- " .set "MIPS_ISA_LEVEL" \n" \
5827- " ll %0, %1 # atomic_" #op "\n" \
5828- " " #asm_op " %0, %2 \n" \
5829- " sc %0, %1 \n" \
5830- " .set mips0 \n" \
5831- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5832- : "Ir" (i)); \
5833- } while (unlikely(!temp)); \
5834+ __asm__ __volatile__( \
5835+ " .set "MIPS_ISA_LEVEL" \n" \
5836+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5837+ "2: " #asm_op " %0, %2 \n" \
5838+ " sc %0, %1 \n" \
5839+ " beqz %0, 1b \n" \
5840+ extable \
5841+ " .set mips0 \n" \
5842+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5843+ : "Ir" (i)); \
5844 } else { \
5845 unsigned long flags; \
5846 \
5847 raw_local_irq_save(flags); \
5848- v->counter c_op i; \
5849+ __asm__ __volatile__( \
5850+ "2: " #asm_op " %0, %1 \n" \
5851+ extable \
5852+ : "+r" (v->counter) : "Ir" (i)); \
5853 raw_local_irq_restore(flags); \
5854 } \
5855 }
5856
5857-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
5858-static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5859+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, _unchecked, asm_op##u, ) \
5860+ __ATOMIC_OP(op, , asm_op, __OVERFLOW_EXTABLE)
5861+
5862+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op, extable) \
5863+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t * v) \
5864 { \
5865 int result; \
5866 \
5867@@ -89,12 +143,15 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5868 int temp; \
5869 \
5870 __asm__ __volatile__( \
5871- " .set arch=r4000 \n" \
5872- "1: ll %1, %2 # atomic_" #op "_return \n" \
5873- " " #asm_op " %0, %1, %3 \n" \
5874+ " .set mips3 \n" \
5875+ "1: ll %1, %2 # atomic_" #op "_return" #suffix"\n" \
5876+ "2: " #asm_op " %0, %1, %3 \n" \
5877 " sc %0, %2 \n" \
5878 " beqzl %0, 1b \n" \
5879- " " #asm_op " %0, %1, %3 \n" \
5880+ post_op \
5881+ extable \
5882+ "4: " #asm_op " %0, %1, %3 \n" \
5883+ "5: \n" \
5884 " .set mips0 \n" \
5885 : "=&r" (result), "=&r" (temp), \
5886 "+" GCC_OFF_SMALL_ASM() (v->counter) \
5887@@ -102,26 +159,33 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5888 } else if (kernel_uses_llsc) { \
5889 int temp; \
5890 \
5891- do { \
5892- __asm__ __volatile__( \
5893- " .set "MIPS_ISA_LEVEL" \n" \
5894- " ll %1, %2 # atomic_" #op "_return \n" \
5895- " " #asm_op " %0, %1, %3 \n" \
5896- " sc %0, %2 \n" \
5897- " .set mips0 \n" \
5898- : "=&r" (result), "=&r" (temp), \
5899- "+" GCC_OFF_SMALL_ASM() (v->counter) \
5900- : "Ir" (i)); \
5901- } while (unlikely(!result)); \
5902+ __asm__ __volatile__( \
5903+ " .set "MIPS_ISA_LEVEL" \n" \
5904+ "1: ll %1, %2 # atomic_" #op "_return" #suffix "\n" \
5905+ "2: " #asm_op " %0, %1, %3 \n" \
5906+ " sc %0, %2 \n" \
5907+ post_op \
5908+ extable \
5909+ "4: " #asm_op " %0, %1, %3 \n" \
5910+ "5: \n" \
5911+ " .set mips0 \n" \
5912+ : "=&r" (result), "=&r" (temp), \
5913+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
5914+ : "Ir" (i)); \
5915 \
5916 result = temp; result c_op i; \
5917 } else { \
5918 unsigned long flags; \
5919 \
5920 raw_local_irq_save(flags); \
5921- result = v->counter; \
5922- result c_op i; \
5923- v->counter = result; \
5924+ __asm__ __volatile__( \
5925+ " lw %0, %1 \n" \
5926+ "2: " #asm_op " %0, %1, %2 \n" \
5927+ " sw %0, %1 \n" \
5928+ "3: \n" \
5929+ extable \
5930+ : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5931+ : "Ir" (i)); \
5932 raw_local_irq_restore(flags); \
5933 } \
5934 \
5935@@ -130,16 +194,21 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5936 return result; \
5937 }
5938
5939-#define ATOMIC_OPS(op, c_op, asm_op) \
5940- ATOMIC_OP(op, c_op, asm_op) \
5941- ATOMIC_OP_RETURN(op, c_op, asm_op)
5942+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, asm_op##u, , ) \
5943+ __ATOMIC_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
5944
5945-ATOMIC_OPS(add, +=, addu)
5946-ATOMIC_OPS(sub, -=, subu)
5947+#define ATOMIC_OPS(op, asm_op) \
5948+ ATOMIC_OP(op, asm_op) \
5949+ ATOMIC_OP_RETURN(op, asm_op)
5950+
5951+ATOMIC_OPS(add, add)
5952+ATOMIC_OPS(sub, sub)
5953
5954 #undef ATOMIC_OPS
5955 #undef ATOMIC_OP_RETURN
5956+#undef __ATOMIC_OP_RETURN
5957 #undef ATOMIC_OP
5958+#undef __ATOMIC_OP
5959
5960 /*
5961 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
5962@@ -149,7 +218,7 @@ ATOMIC_OPS(sub, -=, subu)
5963 * Atomically test @v and subtract @i if @v is greater or equal than @i.
5964 * The function returns the old value of @v minus @i.
5965 */
5966-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5967+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
5968 {
5969 int result;
5970
5971@@ -159,7 +228,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5972 int temp;
5973
5974 __asm__ __volatile__(
5975- " .set arch=r4000 \n"
5976+ " .set "MIPS_ISA_LEVEL" \n"
5977 "1: ll %1, %2 # atomic_sub_if_positive\n"
5978 " subu %0, %1, %3 \n"
5979 " bltz %0, 1f \n"
5980@@ -208,8 +277,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5981 return result;
5982 }
5983
5984-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5985-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
5986+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
5987+{
5988+ return cmpxchg(&v->counter, old, new);
5989+}
5990+
5991+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
5992+ int new)
5993+{
5994+ return cmpxchg(&(v->counter), old, new);
5995+}
5996+
5997+static inline int atomic_xchg(atomic_t *v, int new)
5998+{
5999+ return xchg(&v->counter, new);
6000+}
6001+
6002+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6003+{
6004+ return xchg(&(v->counter), new);
6005+}
6006
6007 /**
6008 * __atomic_add_unless - add unless the number is a given value
6009@@ -237,6 +324,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6010
6011 #define atomic_dec_return(v) atomic_sub_return(1, (v))
6012 #define atomic_inc_return(v) atomic_add_return(1, (v))
6013+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6014+{
6015+ return atomic_add_return_unchecked(1, v);
6016+}
6017
6018 /*
6019 * atomic_sub_and_test - subtract value from variable and test result
6020@@ -258,6 +349,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6021 * other cases.
6022 */
6023 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
6024+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6025+{
6026+ return atomic_add_return_unchecked(1, v) == 0;
6027+}
6028
6029 /*
6030 * atomic_dec_and_test - decrement by 1 and test
6031@@ -282,6 +377,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6032 * Atomically increments @v by 1.
6033 */
6034 #define atomic_inc(v) atomic_add(1, (v))
6035+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
6036+{
6037+ atomic_add_unchecked(1, v);
6038+}
6039
6040 /*
6041 * atomic_dec - decrement and test
6042@@ -290,6 +389,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6043 * Atomically decrements @v by 1.
6044 */
6045 #define atomic_dec(v) atomic_sub(1, (v))
6046+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
6047+{
6048+ atomic_sub_unchecked(1, v);
6049+}
6050
6051 /*
6052 * atomic_add_negative - add and test if negative
6053@@ -311,54 +414,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6054 * @v: pointer of type atomic64_t
6055 *
6056 */
6057-#define atomic64_read(v) ACCESS_ONCE((v)->counter)
6058+static inline long atomic64_read(const atomic64_t *v)
6059+{
6060+ return ACCESS_ONCE(v->counter);
6061+}
6062+
6063+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6064+{
6065+ return ACCESS_ONCE(v->counter);
6066+}
6067
6068 /*
6069 * atomic64_set - set atomic variable
6070 * @v: pointer of type atomic64_t
6071 * @i: required value
6072 */
6073-#define atomic64_set(v, i) ((v)->counter = (i))
6074+static inline void atomic64_set(atomic64_t *v, long i)
6075+{
6076+ v->counter = i;
6077+}
6078
6079-#define ATOMIC64_OP(op, c_op, asm_op) \
6080-static __inline__ void atomic64_##op(long i, atomic64_t * v) \
6081+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6082+{
6083+ v->counter = i;
6084+}
6085+
6086+#define __ATOMIC64_OP(op, suffix, asm_op, extable) \
6087+static inline void atomic64_##op##suffix(long i, atomic64##suffix##_t * v) \
6088 { \
6089 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
6090 long temp; \
6091 \
6092 __asm__ __volatile__( \
6093- " .set arch=r4000 \n" \
6094- "1: lld %0, %1 # atomic64_" #op " \n" \
6095- " " #asm_op " %0, %2 \n" \
6096+ " .set "MIPS_ISA_LEVEL" \n" \
6097+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6098+ "2: " #asm_op " %0, %2 \n" \
6099 " scd %0, %1 \n" \
6100 " beqzl %0, 1b \n" \
6101+ extable \
6102 " .set mips0 \n" \
6103 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6104 : "Ir" (i)); \
6105 } else if (kernel_uses_llsc) { \
6106 long temp; \
6107 \
6108- do { \
6109- __asm__ __volatile__( \
6110- " .set "MIPS_ISA_LEVEL" \n" \
6111- " lld %0, %1 # atomic64_" #op "\n" \
6112- " " #asm_op " %0, %2 \n" \
6113- " scd %0, %1 \n" \
6114- " .set mips0 \n" \
6115- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6116- : "Ir" (i)); \
6117- } while (unlikely(!temp)); \
6118+ __asm__ __volatile__( \
6119+ " .set "MIPS_ISA_LEVEL" \n" \
6120+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6121+ "2: " #asm_op " %0, %2 \n" \
6122+ " scd %0, %1 \n" \
6123+ " beqz %0, 1b \n" \
6124+ extable \
6125+ " .set mips0 \n" \
6126+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6127+ : "Ir" (i)); \
6128 } else { \
6129 unsigned long flags; \
6130 \
6131 raw_local_irq_save(flags); \
6132- v->counter c_op i; \
6133+ __asm__ __volatile__( \
6134+ "2: " #asm_op " %0, %1 \n" \
6135+ extable \
6136+ : "+" GCC_OFF_SMALL_ASM() (v->counter) : "Ir" (i)); \
6137 raw_local_irq_restore(flags); \
6138 } \
6139 }
6140
6141-#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
6142-static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6143+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, _unchecked, asm_op##u, ) \
6144+ __ATOMIC64_OP(op, , asm_op, __OVERFLOW_EXTABLE)
6145+
6146+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op, extable) \
6147+static inline long atomic64_##op##_return##suffix(long i, atomic64##suffix##_t * v)\
6148 { \
6149 long result; \
6150 \
6151@@ -368,12 +494,15 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6152 long temp; \
6153 \
6154 __asm__ __volatile__( \
6155- " .set arch=r4000 \n" \
6156+ " .set mips3 \n" \
6157 "1: lld %1, %2 # atomic64_" #op "_return\n" \
6158- " " #asm_op " %0, %1, %3 \n" \
6159+ "2: " #asm_op " %0, %1, %3 \n" \
6160 " scd %0, %2 \n" \
6161 " beqzl %0, 1b \n" \
6162- " " #asm_op " %0, %1, %3 \n" \
6163+ post_op \
6164+ extable \
6165+ "4: " #asm_op " %0, %1, %3 \n" \
6166+ "5: \n" \
6167 " .set mips0 \n" \
6168 : "=&r" (result), "=&r" (temp), \
6169 "+" GCC_OFF_SMALL_ASM() (v->counter) \
6170@@ -381,27 +510,35 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6171 } else if (kernel_uses_llsc) { \
6172 long temp; \
6173 \
6174- do { \
6175- __asm__ __volatile__( \
6176- " .set "MIPS_ISA_LEVEL" \n" \
6177- " lld %1, %2 # atomic64_" #op "_return\n" \
6178- " " #asm_op " %0, %1, %3 \n" \
6179- " scd %0, %2 \n" \
6180- " .set mips0 \n" \
6181- : "=&r" (result), "=&r" (temp), \
6182- "=" GCC_OFF_SMALL_ASM() (v->counter) \
6183- : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
6184- : "memory"); \
6185- } while (unlikely(!result)); \
6186+ __asm__ __volatile__( \
6187+ " .set "MIPS_ISA_LEVEL" \n" \
6188+ "1: lld %1, %2 # atomic64_" #op "_return" #suffix "\n"\
6189+ "2: " #asm_op " %0, %1, %3 \n" \
6190+ " scd %0, %2 \n" \
6191+ " beqz %0, 1b \n" \
6192+ post_op \
6193+ extable \
6194+ "4: " #asm_op " %0, %1, %3 \n" \
6195+ "5: \n" \
6196+ " .set mips0 \n" \
6197+ : "=&r" (result), "=&r" (temp), \
6198+ "=" GCC_OFF_SMALL_ASM() (v->counter) \
6199+ : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
6200+ : "memory"); \
6201 \
6202 result = temp; result c_op i; \
6203 } else { \
6204 unsigned long flags; \
6205 \
6206 raw_local_irq_save(flags); \
6207- result = v->counter; \
6208- result c_op i; \
6209- v->counter = result; \
6210+ __asm__ __volatile__( \
6211+ " ld %0, %1 \n" \
6212+ "2: " #asm_op " %0, %1, %2 \n" \
6213+ " sd %0, %1 \n" \
6214+ "3: \n" \
6215+ extable \
6216+ : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6217+ : "Ir" (i)); \
6218 raw_local_irq_restore(flags); \
6219 } \
6220 \
6221@@ -410,16 +547,23 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6222 return result; \
6223 }
6224
6225-#define ATOMIC64_OPS(op, c_op, asm_op) \
6226- ATOMIC64_OP(op, c_op, asm_op) \
6227- ATOMIC64_OP_RETURN(op, c_op, asm_op)
6228+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, _unchecked, asm_op##u, , ) \
6229+ __ATOMIC64_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
6230
6231-ATOMIC64_OPS(add, +=, daddu)
6232-ATOMIC64_OPS(sub, -=, dsubu)
6233+#define ATOMIC64_OPS(op, asm_op) \
6234+ ATOMIC64_OP(op, asm_op) \
6235+ ATOMIC64_OP_RETURN(op, asm_op)
6236+
6237+ATOMIC64_OPS(add, dadd)
6238+ATOMIC64_OPS(sub, dsub)
6239
6240 #undef ATOMIC64_OPS
6241 #undef ATOMIC64_OP_RETURN
6242+#undef __ATOMIC64_OP_RETURN
6243 #undef ATOMIC64_OP
6244+#undef __ATOMIC64_OP
6245+#undef __OVERFLOW_EXTABLE
6246+#undef __OVERFLOW_POST
6247
6248 /*
6249 * atomic64_sub_if_positive - conditionally subtract integer from atomic
6250@@ -430,7 +574,7 @@ ATOMIC64_OPS(sub, -=, dsubu)
6251 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6252 * The function returns the old value of @v minus @i.
6253 */
6254-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6255+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6256 {
6257 long result;
6258
6259@@ -440,7 +584,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6260 long temp;
6261
6262 __asm__ __volatile__(
6263- " .set arch=r4000 \n"
6264+ " .set "MIPS_ISA_LEVEL" \n"
6265 "1: lld %1, %2 # atomic64_sub_if_positive\n"
6266 " dsubu %0, %1, %3 \n"
6267 " bltz %0, 1f \n"
6268@@ -489,9 +633,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6269 return result;
6270 }
6271
6272-#define atomic64_cmpxchg(v, o, n) \
6273- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6274-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6275+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6276+{
6277+ return cmpxchg(&v->counter, old, new);
6278+}
6279+
6280+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6281+ long new)
6282+{
6283+ return cmpxchg(&(v->counter), old, new);
6284+}
6285+
6286+static inline long atomic64_xchg(atomic64_t *v, long new)
6287+{
6288+ return xchg(&v->counter, new);
6289+}
6290+
6291+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6292+{
6293+ return xchg(&(v->counter), new);
6294+}
6295
6296 /**
6297 * atomic64_add_unless - add unless the number is a given value
6298@@ -521,6 +682,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6299
6300 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6301 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6302+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6303
6304 /*
6305 * atomic64_sub_and_test - subtract value from variable and test result
6306@@ -542,6 +704,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6307 * other cases.
6308 */
6309 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6310+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6311
6312 /*
6313 * atomic64_dec_and_test - decrement by 1 and test
6314@@ -566,6 +729,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6315 * Atomically increments @v by 1.
6316 */
6317 #define atomic64_inc(v) atomic64_add(1, (v))
6318+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6319
6320 /*
6321 * atomic64_dec - decrement and test
6322@@ -574,6 +738,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6323 * Atomically decrements @v by 1.
6324 */
6325 #define atomic64_dec(v) atomic64_sub(1, (v))
6326+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6327
6328 /*
6329 * atomic64_add_negative - add and test if negative
6330diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
6331index 2b8bbbc..4556df6 100644
6332--- a/arch/mips/include/asm/barrier.h
6333+++ b/arch/mips/include/asm/barrier.h
6334@@ -133,7 +133,7 @@
6335 do { \
6336 compiletime_assert_atomic_type(*p); \
6337 smp_mb(); \
6338- ACCESS_ONCE(*p) = (v); \
6339+ ACCESS_ONCE_RW(*p) = (v); \
6340 } while (0)
6341
6342 #define smp_load_acquire(p) \
6343diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6344index b4db69f..8f3b093 100644
6345--- a/arch/mips/include/asm/cache.h
6346+++ b/arch/mips/include/asm/cache.h
6347@@ -9,10 +9,11 @@
6348 #ifndef _ASM_CACHE_H
6349 #define _ASM_CACHE_H
6350
6351+#include <linux/const.h>
6352 #include <kmalloc.h>
6353
6354 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6355-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6356+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6357
6358 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6359 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6360diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6361index 694925a..990fa62 100644
6362--- a/arch/mips/include/asm/elf.h
6363+++ b/arch/mips/include/asm/elf.h
6364@@ -410,15 +410,18 @@ extern const char *__elf_platform;
6365 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6366 #endif
6367
6368+#ifdef CONFIG_PAX_ASLR
6369+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6370+
6371+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6372+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6373+#endif
6374+
6375 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6376 struct linux_binprm;
6377 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6378 int uses_interp);
6379
6380-struct mm_struct;
6381-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6382-#define arch_randomize_brk arch_randomize_brk
6383-
6384 struct arch_elf_state {
6385 int fp_abi;
6386 int interp_fp_abi;
6387diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6388index c1f6afa..38cc6e9 100644
6389--- a/arch/mips/include/asm/exec.h
6390+++ b/arch/mips/include/asm/exec.h
6391@@ -12,6 +12,6 @@
6392 #ifndef _ASM_EXEC_H
6393 #define _ASM_EXEC_H
6394
6395-extern unsigned long arch_align_stack(unsigned long sp);
6396+#define arch_align_stack(x) ((x) & ~0xfUL)
6397
6398 #endif /* _ASM_EXEC_H */
6399diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
6400index 9e8ef59..1139d6b 100644
6401--- a/arch/mips/include/asm/hw_irq.h
6402+++ b/arch/mips/include/asm/hw_irq.h
6403@@ -10,7 +10,7 @@
6404
6405 #include <linux/atomic.h>
6406
6407-extern atomic_t irq_err_count;
6408+extern atomic_unchecked_t irq_err_count;
6409
6410 /*
6411 * interrupt-retrigger: NOP for now. This may not be appropriate for all
6412diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6413index 8feaed6..1bd8a64 100644
6414--- a/arch/mips/include/asm/local.h
6415+++ b/arch/mips/include/asm/local.h
6416@@ -13,15 +13,25 @@ typedef struct
6417 atomic_long_t a;
6418 } local_t;
6419
6420+typedef struct {
6421+ atomic_long_unchecked_t a;
6422+} local_unchecked_t;
6423+
6424 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6425
6426 #define local_read(l) atomic_long_read(&(l)->a)
6427+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6428 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6429+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6430
6431 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6432+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6433 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6434+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6435 #define local_inc(l) atomic_long_inc(&(l)->a)
6436+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6437 #define local_dec(l) atomic_long_dec(&(l)->a)
6438+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6439
6440 /*
6441 * Same as above, but return the result value
6442@@ -71,6 +81,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6443 return result;
6444 }
6445
6446+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6447+{
6448+ unsigned long result;
6449+
6450+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6451+ unsigned long temp;
6452+
6453+ __asm__ __volatile__(
6454+ " .set mips3 \n"
6455+ "1:" __LL "%1, %2 # local_add_return \n"
6456+ " addu %0, %1, %3 \n"
6457+ __SC "%0, %2 \n"
6458+ " beqzl %0, 1b \n"
6459+ " addu %0, %1, %3 \n"
6460+ " .set mips0 \n"
6461+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6462+ : "Ir" (i), "m" (l->a.counter)
6463+ : "memory");
6464+ } else if (kernel_uses_llsc) {
6465+ unsigned long temp;
6466+
6467+ __asm__ __volatile__(
6468+ " .set mips3 \n"
6469+ "1:" __LL "%1, %2 # local_add_return \n"
6470+ " addu %0, %1, %3 \n"
6471+ __SC "%0, %2 \n"
6472+ " beqz %0, 1b \n"
6473+ " addu %0, %1, %3 \n"
6474+ " .set mips0 \n"
6475+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6476+ : "Ir" (i), "m" (l->a.counter)
6477+ : "memory");
6478+ } else {
6479+ unsigned long flags;
6480+
6481+ local_irq_save(flags);
6482+ result = l->a.counter;
6483+ result += i;
6484+ l->a.counter = result;
6485+ local_irq_restore(flags);
6486+ }
6487+
6488+ return result;
6489+}
6490+
6491 static __inline__ long local_sub_return(long i, local_t * l)
6492 {
6493 unsigned long result;
6494@@ -118,6 +173,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6495
6496 #define local_cmpxchg(l, o, n) \
6497 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6498+#define local_cmpxchg_unchecked(l, o, n) \
6499+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6500 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6501
6502 /**
6503diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6504index 154b70a..426ae3d 100644
6505--- a/arch/mips/include/asm/page.h
6506+++ b/arch/mips/include/asm/page.h
6507@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6508 #ifdef CONFIG_CPU_MIPS32
6509 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6510 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6511- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6512+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6513 #else
6514 typedef struct { unsigned long long pte; } pte_t;
6515 #define pte_val(x) ((x).pte)
6516diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6517index b336037..5b874cc 100644
6518--- a/arch/mips/include/asm/pgalloc.h
6519+++ b/arch/mips/include/asm/pgalloc.h
6520@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6521 {
6522 set_pud(pud, __pud((unsigned long)pmd));
6523 }
6524+
6525+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6526+{
6527+ pud_populate(mm, pud, pmd);
6528+}
6529 #endif
6530
6531 /*
6532diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
6533index f8f809f..b5f3fa4 100644
6534--- a/arch/mips/include/asm/pgtable.h
6535+++ b/arch/mips/include/asm/pgtable.h
6536@@ -20,6 +20,9 @@
6537 #include <asm/io.h>
6538 #include <asm/pgtable-bits.h>
6539
6540+#define ktla_ktva(addr) (addr)
6541+#define ktva_ktla(addr) (addr)
6542+
6543 struct mm_struct;
6544 struct vm_area_struct;
6545
6546diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6547index 55ed660..3dc9422 100644
6548--- a/arch/mips/include/asm/thread_info.h
6549+++ b/arch/mips/include/asm/thread_info.h
6550@@ -102,6 +102,9 @@ static inline struct thread_info *current_thread_info(void)
6551 #define TIF_SECCOMP 4 /* secure computing */
6552 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
6553 #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
6554+/* li takes a 32bit immediate */
6555+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
6556+
6557 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
6558 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
6559 #define TIF_NOHZ 19 /* in adaptive nohz mode */
6560@@ -137,14 +140,16 @@ static inline struct thread_info *current_thread_info(void)
6561 #define _TIF_USEDMSA (1<<TIF_USEDMSA)
6562 #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
6563 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6564+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6565
6566 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6567 _TIF_SYSCALL_AUDIT | \
6568- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
6569+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
6570+ _TIF_GRSEC_SETXID)
6571
6572 /* work to do in syscall_trace_leave() */
6573 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6574- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6575+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6576
6577 /* work to do on interrupt/exception return */
6578 #define _TIF_WORK_MASK \
6579@@ -152,7 +157,7 @@ static inline struct thread_info *current_thread_info(void)
6580 /* work to do on any return to u-space */
6581 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6582 _TIF_WORK_SYSCALL_EXIT | \
6583- _TIF_SYSCALL_TRACEPOINT)
6584+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6585
6586 /*
6587 * We stash processor id into a COP0 register to retrieve it fast
6588diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
6589index bf8b324..cec5705 100644
6590--- a/arch/mips/include/asm/uaccess.h
6591+++ b/arch/mips/include/asm/uaccess.h
6592@@ -130,6 +130,7 @@ extern u64 __ua_limit;
6593 __ok == 0; \
6594 })
6595
6596+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
6597 #define access_ok(type, addr, size) \
6598 likely(__access_ok((addr), (size), __access_mask))
6599
6600diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6601index 1188e00..41cf144 100644
6602--- a/arch/mips/kernel/binfmt_elfn32.c
6603+++ b/arch/mips/kernel/binfmt_elfn32.c
6604@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6605 #undef ELF_ET_DYN_BASE
6606 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6607
6608+#ifdef CONFIG_PAX_ASLR
6609+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6610+
6611+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6612+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6613+#endif
6614+
6615 #include <asm/processor.h>
6616 #include <linux/module.h>
6617 #include <linux/elfcore.h>
6618diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6619index 9287678..f870e47 100644
6620--- a/arch/mips/kernel/binfmt_elfo32.c
6621+++ b/arch/mips/kernel/binfmt_elfo32.c
6622@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6623 #undef ELF_ET_DYN_BASE
6624 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6625
6626+#ifdef CONFIG_PAX_ASLR
6627+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6628+
6629+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6630+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6631+#endif
6632+
6633 #include <asm/processor.h>
6634
6635 #include <linux/module.h>
6636diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
6637index a74ec3a..4f06f18 100644
6638--- a/arch/mips/kernel/i8259.c
6639+++ b/arch/mips/kernel/i8259.c
6640@@ -202,7 +202,7 @@ spurious_8259A_irq:
6641 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
6642 spurious_irq_mask |= irqmask;
6643 }
6644- atomic_inc(&irq_err_count);
6645+ atomic_inc_unchecked(&irq_err_count);
6646 /*
6647 * Theoretically we do not have to handle this IRQ,
6648 * but in Linux this does not cause problems and is
6649diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
6650index 44a1f79..2bd6aa3 100644
6651--- a/arch/mips/kernel/irq-gt641xx.c
6652+++ b/arch/mips/kernel/irq-gt641xx.c
6653@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
6654 }
6655 }
6656
6657- atomic_inc(&irq_err_count);
6658+ atomic_inc_unchecked(&irq_err_count);
6659 }
6660
6661 void __init gt641xx_irq_init(void)
6662diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
6663index d2bfbc2..a8eacd2 100644
6664--- a/arch/mips/kernel/irq.c
6665+++ b/arch/mips/kernel/irq.c
6666@@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
6667 printk("unexpected IRQ # %d\n", irq);
6668 }
6669
6670-atomic_t irq_err_count;
6671+atomic_unchecked_t irq_err_count;
6672
6673 int arch_show_interrupts(struct seq_file *p, int prec)
6674 {
6675- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
6676+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
6677 return 0;
6678 }
6679
6680 asmlinkage void spurious_interrupt(void)
6681 {
6682- atomic_inc(&irq_err_count);
6683+ atomic_inc_unchecked(&irq_err_count);
6684 }
6685
6686 void __init init_IRQ(void)
6687@@ -109,7 +109,10 @@ void __init init_IRQ(void)
6688 #endif
6689 }
6690
6691+
6692 #ifdef DEBUG_STACKOVERFLOW
6693+extern void gr_handle_kernel_exploit(void);
6694+
6695 static inline void check_stack_overflow(void)
6696 {
6697 unsigned long sp;
6698@@ -125,6 +128,7 @@ static inline void check_stack_overflow(void)
6699 printk("do_IRQ: stack overflow: %ld\n",
6700 sp - sizeof(struct thread_info));
6701 dump_stack();
6702+ gr_handle_kernel_exploit();
6703 }
6704 }
6705 #else
6706diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
6707index 0614717..002fa43 100644
6708--- a/arch/mips/kernel/pm-cps.c
6709+++ b/arch/mips/kernel/pm-cps.c
6710@@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
6711 nc_core_ready_count = nc_addr;
6712
6713 /* Ensure ready_count is zero-initialised before the assembly runs */
6714- ACCESS_ONCE(*nc_core_ready_count) = 0;
6715+ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
6716 coupled_barrier(&per_cpu(pm_barrier, core), online);
6717
6718 /* Run the generated entry code */
6719diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6720index bf85cc1..b365c61 100644
6721--- a/arch/mips/kernel/process.c
6722+++ b/arch/mips/kernel/process.c
6723@@ -535,18 +535,6 @@ out:
6724 return pc;
6725 }
6726
6727-/*
6728- * Don't forget that the stack pointer must be aligned on a 8 bytes
6729- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6730- */
6731-unsigned long arch_align_stack(unsigned long sp)
6732-{
6733- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6734- sp -= get_random_int() & ~PAGE_MASK;
6735-
6736- return sp & ALMASK;
6737-}
6738-
6739 static void arch_dump_stack(void *info)
6740 {
6741 struct pt_regs *regs;
6742diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6743index 5104528..950bbdc 100644
6744--- a/arch/mips/kernel/ptrace.c
6745+++ b/arch/mips/kernel/ptrace.c
6746@@ -761,6 +761,10 @@ long arch_ptrace(struct task_struct *child, long request,
6747 return ret;
6748 }
6749
6750+#ifdef CONFIG_GRKERNSEC_SETXID
6751+extern void gr_delayed_cred_worker(void);
6752+#endif
6753+
6754 /*
6755 * Notification of system call entry/exit
6756 * - triggered by current->work.syscall_trace
6757@@ -779,6 +783,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
6758 tracehook_report_syscall_entry(regs))
6759 ret = -1;
6760
6761+#ifdef CONFIG_GRKERNSEC_SETXID
6762+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6763+ gr_delayed_cred_worker();
6764+#endif
6765+
6766 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6767 trace_sys_enter(regs, regs->regs[2]);
6768
6769diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
6770index 07fc524..b9d7f28 100644
6771--- a/arch/mips/kernel/reset.c
6772+++ b/arch/mips/kernel/reset.c
6773@@ -13,6 +13,7 @@
6774 #include <linux/reboot.h>
6775
6776 #include <asm/reboot.h>
6777+#include <asm/bug.h>
6778
6779 /*
6780 * Urgs ... Too many MIPS machines to handle this in a generic way.
6781@@ -29,16 +30,19 @@ void machine_restart(char *command)
6782 {
6783 if (_machine_restart)
6784 _machine_restart(command);
6785+ BUG();
6786 }
6787
6788 void machine_halt(void)
6789 {
6790 if (_machine_halt)
6791 _machine_halt();
6792+ BUG();
6793 }
6794
6795 void machine_power_off(void)
6796 {
6797 if (pm_power_off)
6798 pm_power_off();
6799+ BUG();
6800 }
6801diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
6802index 2242bdd..b284048 100644
6803--- a/arch/mips/kernel/sync-r4k.c
6804+++ b/arch/mips/kernel/sync-r4k.c
6805@@ -18,8 +18,8 @@
6806 #include <asm/mipsregs.h>
6807
6808 static atomic_t count_start_flag = ATOMIC_INIT(0);
6809-static atomic_t count_count_start = ATOMIC_INIT(0);
6810-static atomic_t count_count_stop = ATOMIC_INIT(0);
6811+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
6812+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
6813 static atomic_t count_reference = ATOMIC_INIT(0);
6814
6815 #define COUNTON 100
6816@@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
6817
6818 for (i = 0; i < NR_LOOPS; i++) {
6819 /* slaves loop on '!= 2' */
6820- while (atomic_read(&count_count_start) != 1)
6821+ while (atomic_read_unchecked(&count_count_start) != 1)
6822 mb();
6823- atomic_set(&count_count_stop, 0);
6824+ atomic_set_unchecked(&count_count_stop, 0);
6825 smp_wmb();
6826
6827 /* this lets the slaves write their count register */
6828- atomic_inc(&count_count_start);
6829+ atomic_inc_unchecked(&count_count_start);
6830
6831 /*
6832 * Everyone initialises count in the last loop:
6833@@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
6834 /*
6835 * Wait for all slaves to leave the synchronization point:
6836 */
6837- while (atomic_read(&count_count_stop) != 1)
6838+ while (atomic_read_unchecked(&count_count_stop) != 1)
6839 mb();
6840- atomic_set(&count_count_start, 0);
6841+ atomic_set_unchecked(&count_count_start, 0);
6842 smp_wmb();
6843- atomic_inc(&count_count_stop);
6844+ atomic_inc_unchecked(&count_count_stop);
6845 }
6846 /* Arrange for an interrupt in a short while */
6847 write_c0_compare(read_c0_count() + COUNTON);
6848@@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
6849 initcount = atomic_read(&count_reference);
6850
6851 for (i = 0; i < NR_LOOPS; i++) {
6852- atomic_inc(&count_count_start);
6853- while (atomic_read(&count_count_start) != 2)
6854+ atomic_inc_unchecked(&count_count_start);
6855+ while (atomic_read_unchecked(&count_count_start) != 2)
6856 mb();
6857
6858 /*
6859@@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
6860 if (i == NR_LOOPS-1)
6861 write_c0_count(initcount);
6862
6863- atomic_inc(&count_count_stop);
6864- while (atomic_read(&count_count_stop) != 2)
6865+ atomic_inc_unchecked(&count_count_stop);
6866+ while (atomic_read_unchecked(&count_count_stop) != 2)
6867 mb();
6868 }
6869 /* Arrange for an interrupt in a short while */
6870diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
6871index 33984c0..666a96d 100644
6872--- a/arch/mips/kernel/traps.c
6873+++ b/arch/mips/kernel/traps.c
6874@@ -689,7 +689,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
6875 siginfo_t info;
6876
6877 prev_state = exception_enter();
6878- die_if_kernel("Integer overflow", regs);
6879+ if (unlikely(!user_mode(regs))) {
6880+
6881+#ifdef CONFIG_PAX_REFCOUNT
6882+ if (fixup_exception(regs)) {
6883+ pax_report_refcount_overflow(regs);
6884+ exception_exit(prev_state);
6885+ return;
6886+ }
6887+#endif
6888+
6889+ die("Integer overflow", regs);
6890+ }
6891
6892 info.si_code = FPE_INTOVF;
6893 info.si_signo = SIGFPE;
6894diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
6895index f5e7dda..47198ec 100644
6896--- a/arch/mips/kvm/mips.c
6897+++ b/arch/mips/kvm/mips.c
6898@@ -816,7 +816,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
6899 return r;
6900 }
6901
6902-int kvm_arch_init(void *opaque)
6903+int kvm_arch_init(const void *opaque)
6904 {
6905 if (kvm_mips_callbacks) {
6906 kvm_err("kvm: module already exists\n");
6907diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
6908index 7ff8637..6004edb 100644
6909--- a/arch/mips/mm/fault.c
6910+++ b/arch/mips/mm/fault.c
6911@@ -31,6 +31,23 @@
6912
6913 int show_unhandled_signals = 1;
6914
6915+#ifdef CONFIG_PAX_PAGEEXEC
6916+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6917+{
6918+ unsigned long i;
6919+
6920+ printk(KERN_ERR "PAX: bytes at PC: ");
6921+ for (i = 0; i < 5; i++) {
6922+ unsigned int c;
6923+ if (get_user(c, (unsigned int *)pc+i))
6924+ printk(KERN_CONT "???????? ");
6925+ else
6926+ printk(KERN_CONT "%08x ", c);
6927+ }
6928+ printk("\n");
6929+}
6930+#endif
6931+
6932 /*
6933 * This routine handles page faults. It determines the address,
6934 * and the problem, and then passes it off to one of the appropriate
6935@@ -206,6 +223,14 @@ bad_area:
6936 bad_area_nosemaphore:
6937 /* User mode accesses just cause a SIGSEGV */
6938 if (user_mode(regs)) {
6939+
6940+#ifdef CONFIG_PAX_PAGEEXEC
6941+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
6942+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
6943+ do_group_exit(SIGKILL);
6944+ }
6945+#endif
6946+
6947 tsk->thread.cp0_badvaddr = address;
6948 tsk->thread.error_code = write;
6949 if (show_unhandled_signals &&
6950diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
6951index f1baadd..5472dca 100644
6952--- a/arch/mips/mm/mmap.c
6953+++ b/arch/mips/mm/mmap.c
6954@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6955 struct vm_area_struct *vma;
6956 unsigned long addr = addr0;
6957 int do_color_align;
6958+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6959 struct vm_unmapped_area_info info;
6960
6961 if (unlikely(len > TASK_SIZE))
6962@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6963 do_color_align = 1;
6964
6965 /* requesting a specific address */
6966+
6967+#ifdef CONFIG_PAX_RANDMMAP
6968+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
6969+#endif
6970+
6971 if (addr) {
6972 if (do_color_align)
6973 addr = COLOUR_ALIGN(addr, pgoff);
6974@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6975 addr = PAGE_ALIGN(addr);
6976
6977 vma = find_vma(mm, addr);
6978- if (TASK_SIZE - len >= addr &&
6979- (!vma || addr + len <= vma->vm_start))
6980+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
6981 return addr;
6982 }
6983
6984 info.length = len;
6985 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
6986 info.align_offset = pgoff << PAGE_SHIFT;
6987+ info.threadstack_offset = offset;
6988
6989 if (dir == DOWN) {
6990 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
6991@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6992 {
6993 unsigned long random_factor = 0UL;
6994
6995+#ifdef CONFIG_PAX_RANDMMAP
6996+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6997+#endif
6998+
6999 if (current->flags & PF_RANDOMIZE) {
7000 random_factor = get_random_int();
7001 random_factor = random_factor << PAGE_SHIFT;
7002@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7003
7004 if (mmap_is_legacy()) {
7005 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
7006+
7007+#ifdef CONFIG_PAX_RANDMMAP
7008+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7009+ mm->mmap_base += mm->delta_mmap;
7010+#endif
7011+
7012 mm->get_unmapped_area = arch_get_unmapped_area;
7013 } else {
7014 mm->mmap_base = mmap_base(random_factor);
7015+
7016+#ifdef CONFIG_PAX_RANDMMAP
7017+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7018+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7019+#endif
7020+
7021 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7022 }
7023 }
7024
7025-static inline unsigned long brk_rnd(void)
7026-{
7027- unsigned long rnd = get_random_int();
7028-
7029- rnd = rnd << PAGE_SHIFT;
7030- /* 8MB for 32bit, 256MB for 64bit */
7031- if (TASK_IS_32BIT_ADDR)
7032- rnd = rnd & 0x7ffffful;
7033- else
7034- rnd = rnd & 0xffffffful;
7035-
7036- return rnd;
7037-}
7038-
7039-unsigned long arch_randomize_brk(struct mm_struct *mm)
7040-{
7041- unsigned long base = mm->brk;
7042- unsigned long ret;
7043-
7044- ret = PAGE_ALIGN(base + brk_rnd());
7045-
7046- if (ret < mm->brk)
7047- return mm->brk;
7048-
7049- return ret;
7050-}
7051-
7052 int __virt_addr_valid(const volatile void *kaddr)
7053 {
7054 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
7055diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
7056index a2358b4..7cead4f 100644
7057--- a/arch/mips/sgi-ip27/ip27-nmi.c
7058+++ b/arch/mips/sgi-ip27/ip27-nmi.c
7059@@ -187,9 +187,9 @@ void
7060 cont_nmi_dump(void)
7061 {
7062 #ifndef REAL_NMI_SIGNAL
7063- static atomic_t nmied_cpus = ATOMIC_INIT(0);
7064+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
7065
7066- atomic_inc(&nmied_cpus);
7067+ atomic_inc_unchecked(&nmied_cpus);
7068 #endif
7069 /*
7070 * Only allow 1 cpu to proceed
7071@@ -233,7 +233,7 @@ cont_nmi_dump(void)
7072 udelay(10000);
7073 }
7074 #else
7075- while (atomic_read(&nmied_cpus) != num_online_cpus());
7076+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7077 #endif
7078
7079 /*
7080diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
7081index a046b30..6799527 100644
7082--- a/arch/mips/sni/rm200.c
7083+++ b/arch/mips/sni/rm200.c
7084@@ -270,7 +270,7 @@ spurious_8259A_irq:
7085 "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
7086 spurious_irq_mask |= irqmask;
7087 }
7088- atomic_inc(&irq_err_count);
7089+ atomic_inc_unchecked(&irq_err_count);
7090 /*
7091 * Theoretically we do not have to handle this IRQ,
7092 * but in Linux this does not cause problems and is
7093diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
7094index 41e873b..34d33a7 100644
7095--- a/arch/mips/vr41xx/common/icu.c
7096+++ b/arch/mips/vr41xx/common/icu.c
7097@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
7098
7099 printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
7100
7101- atomic_inc(&irq_err_count);
7102+ atomic_inc_unchecked(&irq_err_count);
7103
7104 return -1;
7105 }
7106diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
7107index ae0e4ee..e8f0692 100644
7108--- a/arch/mips/vr41xx/common/irq.c
7109+++ b/arch/mips/vr41xx/common/irq.c
7110@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
7111 irq_cascade_t *cascade;
7112
7113 if (irq >= NR_IRQS) {
7114- atomic_inc(&irq_err_count);
7115+ atomic_inc_unchecked(&irq_err_count);
7116 return;
7117 }
7118
7119@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
7120 ret = cascade->get_irq(irq);
7121 irq = ret;
7122 if (ret < 0)
7123- atomic_inc(&irq_err_count);
7124+ atomic_inc_unchecked(&irq_err_count);
7125 else
7126 irq_dispatch(irq);
7127 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
7128diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7129index 967d144..db12197 100644
7130--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7131+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7132@@ -11,12 +11,14 @@
7133 #ifndef _ASM_PROC_CACHE_H
7134 #define _ASM_PROC_CACHE_H
7135
7136+#include <linux/const.h>
7137+
7138 /* L1 cache */
7139
7140 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7141 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7142-#define L1_CACHE_BYTES 16 /* bytes per entry */
7143 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7144+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7145 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7146
7147 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7148diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7149index bcb5df2..84fabd2 100644
7150--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7151+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7152@@ -16,13 +16,15 @@
7153 #ifndef _ASM_PROC_CACHE_H
7154 #define _ASM_PROC_CACHE_H
7155
7156+#include <linux/const.h>
7157+
7158 /*
7159 * L1 cache
7160 */
7161 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7162 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7163-#define L1_CACHE_BYTES 32 /* bytes per entry */
7164 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7165+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7166 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7167
7168 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7169diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7170index 4ce7a01..449202a 100644
7171--- a/arch/openrisc/include/asm/cache.h
7172+++ b/arch/openrisc/include/asm/cache.h
7173@@ -19,11 +19,13 @@
7174 #ifndef __ASM_OPENRISC_CACHE_H
7175 #define __ASM_OPENRISC_CACHE_H
7176
7177+#include <linux/const.h>
7178+
7179 /* FIXME: How can we replace these with values from the CPU...
7180 * they shouldn't be hard-coded!
7181 */
7182
7183-#define L1_CACHE_BYTES 16
7184 #define L1_CACHE_SHIFT 4
7185+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7186
7187 #endif /* __ASM_OPENRISC_CACHE_H */
7188diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7189index 226f8ca..9d9b87d 100644
7190--- a/arch/parisc/include/asm/atomic.h
7191+++ b/arch/parisc/include/asm/atomic.h
7192@@ -273,6 +273,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7193 return dec;
7194 }
7195
7196+#define atomic64_read_unchecked(v) atomic64_read(v)
7197+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7198+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7199+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7200+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7201+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7202+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7203+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7204+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7205+
7206 #endif /* !CONFIG_64BIT */
7207
7208
7209diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7210index 47f11c7..3420df2 100644
7211--- a/arch/parisc/include/asm/cache.h
7212+++ b/arch/parisc/include/asm/cache.h
7213@@ -5,6 +5,7 @@
7214 #ifndef __ARCH_PARISC_CACHE_H
7215 #define __ARCH_PARISC_CACHE_H
7216
7217+#include <linux/const.h>
7218
7219 /*
7220 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7221@@ -15,13 +16,13 @@
7222 * just ruin performance.
7223 */
7224 #ifdef CONFIG_PA20
7225-#define L1_CACHE_BYTES 64
7226 #define L1_CACHE_SHIFT 6
7227 #else
7228-#define L1_CACHE_BYTES 32
7229 #define L1_CACHE_SHIFT 5
7230 #endif
7231
7232+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7233+
7234 #ifndef __ASSEMBLY__
7235
7236 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7237diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7238index 3391d06..c23a2cc 100644
7239--- a/arch/parisc/include/asm/elf.h
7240+++ b/arch/parisc/include/asm/elf.h
7241@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7242
7243 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7244
7245+#ifdef CONFIG_PAX_ASLR
7246+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7247+
7248+#define PAX_DELTA_MMAP_LEN 16
7249+#define PAX_DELTA_STACK_LEN 16
7250+#endif
7251+
7252 /* This yields a mask that user programs can use to figure out what
7253 instruction set this CPU supports. This could be done in user space,
7254 but it's not easy, and we've already done it here. */
7255diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
7256index d174372..f27fe5c 100644
7257--- a/arch/parisc/include/asm/pgalloc.h
7258+++ b/arch/parisc/include/asm/pgalloc.h
7259@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7260 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
7261 }
7262
7263+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7264+{
7265+ pgd_populate(mm, pgd, pmd);
7266+}
7267+
7268 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7269 {
7270 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
7271@@ -96,6 +101,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7272 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
7273 #define pmd_free(mm, x) do { } while (0)
7274 #define pgd_populate(mm, pmd, pte) BUG()
7275+#define pgd_populate_kernel(mm, pmd, pte) BUG()
7276
7277 #endif
7278
7279diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
7280index 15207b9..3209e65 100644
7281--- a/arch/parisc/include/asm/pgtable.h
7282+++ b/arch/parisc/include/asm/pgtable.h
7283@@ -215,6 +215,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
7284 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
7285 #define PAGE_COPY PAGE_EXECREAD
7286 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
7287+
7288+#ifdef CONFIG_PAX_PAGEEXEC
7289+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
7290+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7291+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7292+#else
7293+# define PAGE_SHARED_NOEXEC PAGE_SHARED
7294+# define PAGE_COPY_NOEXEC PAGE_COPY
7295+# define PAGE_READONLY_NOEXEC PAGE_READONLY
7296+#endif
7297+
7298 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
7299 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
7300 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
7301diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
7302index 0abdd4c..1af92f0 100644
7303--- a/arch/parisc/include/asm/uaccess.h
7304+++ b/arch/parisc/include/asm/uaccess.h
7305@@ -243,10 +243,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
7306 const void __user *from,
7307 unsigned long n)
7308 {
7309- int sz = __compiletime_object_size(to);
7310+ size_t sz = __compiletime_object_size(to);
7311 int ret = -EFAULT;
7312
7313- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
7314+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
7315 ret = __copy_from_user(to, from, n);
7316 else
7317 copy_from_user_overflow();
7318diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
7319index 3c63a82..b1d6ee9 100644
7320--- a/arch/parisc/kernel/module.c
7321+++ b/arch/parisc/kernel/module.c
7322@@ -98,16 +98,38 @@
7323
7324 /* three functions to determine where in the module core
7325 * or init pieces the location is */
7326+static inline int in_init_rx(struct module *me, void *loc)
7327+{
7328+ return (loc >= me->module_init_rx &&
7329+ loc < (me->module_init_rx + me->init_size_rx));
7330+}
7331+
7332+static inline int in_init_rw(struct module *me, void *loc)
7333+{
7334+ return (loc >= me->module_init_rw &&
7335+ loc < (me->module_init_rw + me->init_size_rw));
7336+}
7337+
7338 static inline int in_init(struct module *me, void *loc)
7339 {
7340- return (loc >= me->module_init &&
7341- loc <= (me->module_init + me->init_size));
7342+ return in_init_rx(me, loc) || in_init_rw(me, loc);
7343+}
7344+
7345+static inline int in_core_rx(struct module *me, void *loc)
7346+{
7347+ return (loc >= me->module_core_rx &&
7348+ loc < (me->module_core_rx + me->core_size_rx));
7349+}
7350+
7351+static inline int in_core_rw(struct module *me, void *loc)
7352+{
7353+ return (loc >= me->module_core_rw &&
7354+ loc < (me->module_core_rw + me->core_size_rw));
7355 }
7356
7357 static inline int in_core(struct module *me, void *loc)
7358 {
7359- return (loc >= me->module_core &&
7360- loc <= (me->module_core + me->core_size));
7361+ return in_core_rx(me, loc) || in_core_rw(me, loc);
7362 }
7363
7364 static inline int in_local(struct module *me, void *loc)
7365@@ -367,13 +389,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7366 }
7367
7368 /* align things a bit */
7369- me->core_size = ALIGN(me->core_size, 16);
7370- me->arch.got_offset = me->core_size;
7371- me->core_size += gots * sizeof(struct got_entry);
7372+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7373+ me->arch.got_offset = me->core_size_rw;
7374+ me->core_size_rw += gots * sizeof(struct got_entry);
7375
7376- me->core_size = ALIGN(me->core_size, 16);
7377- me->arch.fdesc_offset = me->core_size;
7378- me->core_size += fdescs * sizeof(Elf_Fdesc);
7379+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7380+ me->arch.fdesc_offset = me->core_size_rw;
7381+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7382
7383 me->arch.got_max = gots;
7384 me->arch.fdesc_max = fdescs;
7385@@ -391,7 +413,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7386
7387 BUG_ON(value == 0);
7388
7389- got = me->module_core + me->arch.got_offset;
7390+ got = me->module_core_rw + me->arch.got_offset;
7391 for (i = 0; got[i].addr; i++)
7392 if (got[i].addr == value)
7393 goto out;
7394@@ -409,7 +431,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7395 #ifdef CONFIG_64BIT
7396 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7397 {
7398- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7399+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7400
7401 if (!value) {
7402 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7403@@ -427,7 +449,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7404
7405 /* Create new one */
7406 fdesc->addr = value;
7407- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7408+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7409 return (Elf_Addr)fdesc;
7410 }
7411 #endif /* CONFIG_64BIT */
7412@@ -839,7 +861,7 @@ register_unwind_table(struct module *me,
7413
7414 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7415 end = table + sechdrs[me->arch.unwind_section].sh_size;
7416- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7417+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7418
7419 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7420 me->arch.unwind_section, table, end, gp);
7421diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7422index e1ffea2..46ed66e 100644
7423--- a/arch/parisc/kernel/sys_parisc.c
7424+++ b/arch/parisc/kernel/sys_parisc.c
7425@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7426 unsigned long task_size = TASK_SIZE;
7427 int do_color_align, last_mmap;
7428 struct vm_unmapped_area_info info;
7429+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7430
7431 if (len > task_size)
7432 return -ENOMEM;
7433@@ -106,6 +107,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7434 goto found_addr;
7435 }
7436
7437+#ifdef CONFIG_PAX_RANDMMAP
7438+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7439+#endif
7440+
7441 if (addr) {
7442 if (do_color_align && last_mmap)
7443 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7444@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7445 info.high_limit = mmap_upper_limit();
7446 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7447 info.align_offset = shared_align_offset(last_mmap, pgoff);
7448+ info.threadstack_offset = offset;
7449 addr = vm_unmapped_area(&info);
7450
7451 found_addr:
7452@@ -143,6 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7453 unsigned long addr = addr0;
7454 int do_color_align, last_mmap;
7455 struct vm_unmapped_area_info info;
7456+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7457
7458 #ifdef CONFIG_64BIT
7459 /* This should only ever run for 32-bit processes. */
7460@@ -167,6 +174,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7461 }
7462
7463 /* requesting a specific address */
7464+#ifdef CONFIG_PAX_RANDMMAP
7465+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7466+#endif
7467+
7468 if (addr) {
7469 if (do_color_align && last_mmap)
7470 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7471@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7472 info.high_limit = mm->mmap_base;
7473 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7474 info.align_offset = shared_align_offset(last_mmap, pgoff);
7475+ info.threadstack_offset = offset;
7476 addr = vm_unmapped_area(&info);
7477 if (!(addr & ~PAGE_MASK))
7478 goto found_addr;
7479@@ -249,6 +261,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7480 mm->mmap_legacy_base = mmap_legacy_base();
7481 mm->mmap_base = mmap_upper_limit();
7482
7483+#ifdef CONFIG_PAX_RANDMMAP
7484+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
7485+ mm->mmap_legacy_base += mm->delta_mmap;
7486+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7487+ }
7488+#endif
7489+
7490 if (mmap_is_legacy()) {
7491 mm->mmap_base = mm->mmap_legacy_base;
7492 mm->get_unmapped_area = arch_get_unmapped_area;
7493diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7494index 47ee620..1107387 100644
7495--- a/arch/parisc/kernel/traps.c
7496+++ b/arch/parisc/kernel/traps.c
7497@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7498
7499 down_read(&current->mm->mmap_sem);
7500 vma = find_vma(current->mm,regs->iaoq[0]);
7501- if (vma && (regs->iaoq[0] >= vma->vm_start)
7502- && (vma->vm_flags & VM_EXEC)) {
7503-
7504+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7505 fault_address = regs->iaoq[0];
7506 fault_space = regs->iasq[0];
7507
7508diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7509index e5120e6..8ddb5cc 100644
7510--- a/arch/parisc/mm/fault.c
7511+++ b/arch/parisc/mm/fault.c
7512@@ -15,6 +15,7 @@
7513 #include <linux/sched.h>
7514 #include <linux/interrupt.h>
7515 #include <linux/module.h>
7516+#include <linux/unistd.h>
7517
7518 #include <asm/uaccess.h>
7519 #include <asm/traps.h>
7520@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
7521 static unsigned long
7522 parisc_acctyp(unsigned long code, unsigned int inst)
7523 {
7524- if (code == 6 || code == 16)
7525+ if (code == 6 || code == 7 || code == 16)
7526 return VM_EXEC;
7527
7528 switch (inst & 0xf0000000) {
7529@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7530 }
7531 #endif
7532
7533+#ifdef CONFIG_PAX_PAGEEXEC
7534+/*
7535+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7536+ *
7537+ * returns 1 when task should be killed
7538+ * 2 when rt_sigreturn trampoline was detected
7539+ * 3 when unpatched PLT trampoline was detected
7540+ */
7541+static int pax_handle_fetch_fault(struct pt_regs *regs)
7542+{
7543+
7544+#ifdef CONFIG_PAX_EMUPLT
7545+ int err;
7546+
7547+ do { /* PaX: unpatched PLT emulation */
7548+ unsigned int bl, depwi;
7549+
7550+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
7551+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
7552+
7553+ if (err)
7554+ break;
7555+
7556+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
7557+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
7558+
7559+ err = get_user(ldw, (unsigned int *)addr);
7560+ err |= get_user(bv, (unsigned int *)(addr+4));
7561+ err |= get_user(ldw2, (unsigned int *)(addr+8));
7562+
7563+ if (err)
7564+ break;
7565+
7566+ if (ldw == 0x0E801096U &&
7567+ bv == 0xEAC0C000U &&
7568+ ldw2 == 0x0E881095U)
7569+ {
7570+ unsigned int resolver, map;
7571+
7572+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
7573+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
7574+ if (err)
7575+ break;
7576+
7577+ regs->gr[20] = instruction_pointer(regs)+8;
7578+ regs->gr[21] = map;
7579+ regs->gr[22] = resolver;
7580+ regs->iaoq[0] = resolver | 3UL;
7581+ regs->iaoq[1] = regs->iaoq[0] + 4;
7582+ return 3;
7583+ }
7584+ }
7585+ } while (0);
7586+#endif
7587+
7588+#ifdef CONFIG_PAX_EMUTRAMP
7589+
7590+#ifndef CONFIG_PAX_EMUSIGRT
7591+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
7592+ return 1;
7593+#endif
7594+
7595+ do { /* PaX: rt_sigreturn emulation */
7596+ unsigned int ldi1, ldi2, bel, nop;
7597+
7598+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
7599+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
7600+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
7601+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
7602+
7603+ if (err)
7604+ break;
7605+
7606+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
7607+ ldi2 == 0x3414015AU &&
7608+ bel == 0xE4008200U &&
7609+ nop == 0x08000240U)
7610+ {
7611+ regs->gr[25] = (ldi1 & 2) >> 1;
7612+ regs->gr[20] = __NR_rt_sigreturn;
7613+ regs->gr[31] = regs->iaoq[1] + 16;
7614+ regs->sr[0] = regs->iasq[1];
7615+ regs->iaoq[0] = 0x100UL;
7616+ regs->iaoq[1] = regs->iaoq[0] + 4;
7617+ regs->iasq[0] = regs->sr[2];
7618+ regs->iasq[1] = regs->sr[2];
7619+ return 2;
7620+ }
7621+ } while (0);
7622+#endif
7623+
7624+ return 1;
7625+}
7626+
7627+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7628+{
7629+ unsigned long i;
7630+
7631+ printk(KERN_ERR "PAX: bytes at PC: ");
7632+ for (i = 0; i < 5; i++) {
7633+ unsigned int c;
7634+ if (get_user(c, (unsigned int *)pc+i))
7635+ printk(KERN_CONT "???????? ");
7636+ else
7637+ printk(KERN_CONT "%08x ", c);
7638+ }
7639+ printk("\n");
7640+}
7641+#endif
7642+
7643 int fixup_exception(struct pt_regs *regs)
7644 {
7645 const struct exception_table_entry *fix;
7646@@ -234,8 +345,33 @@ retry:
7647
7648 good_area:
7649
7650- if ((vma->vm_flags & acc_type) != acc_type)
7651+ if ((vma->vm_flags & acc_type) != acc_type) {
7652+
7653+#ifdef CONFIG_PAX_PAGEEXEC
7654+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
7655+ (address & ~3UL) == instruction_pointer(regs))
7656+ {
7657+ up_read(&mm->mmap_sem);
7658+ switch (pax_handle_fetch_fault(regs)) {
7659+
7660+#ifdef CONFIG_PAX_EMUPLT
7661+ case 3:
7662+ return;
7663+#endif
7664+
7665+#ifdef CONFIG_PAX_EMUTRAMP
7666+ case 2:
7667+ return;
7668+#endif
7669+
7670+ }
7671+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
7672+ do_group_exit(SIGKILL);
7673+ }
7674+#endif
7675+
7676 goto bad_area;
7677+ }
7678
7679 /*
7680 * If for any reason at all we couldn't handle the fault, make
7681diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
7682index 22b0940..309f790 100644
7683--- a/arch/powerpc/Kconfig
7684+++ b/arch/powerpc/Kconfig
7685@@ -409,6 +409,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
7686 config KEXEC
7687 bool "kexec system call"
7688 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
7689+ depends on !GRKERNSEC_KMEM
7690 help
7691 kexec is a system call that implements the ability to shutdown your
7692 current kernel, and to start another kernel. It is like a reboot
7693diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
7694index 512d278..d31fadd 100644
7695--- a/arch/powerpc/include/asm/atomic.h
7696+++ b/arch/powerpc/include/asm/atomic.h
7697@@ -12,6 +12,11 @@
7698
7699 #define ATOMIC_INIT(i) { (i) }
7700
7701+#define _ASM_EXTABLE(from, to) \
7702+" .section __ex_table,\"a\"\n" \
7703+ PPC_LONG" " #from ", " #to"\n" \
7704+" .previous\n"
7705+
7706 static __inline__ int atomic_read(const atomic_t *v)
7707 {
7708 int t;
7709@@ -21,39 +26,80 @@ static __inline__ int atomic_read(const atomic_t *v)
7710 return t;
7711 }
7712
7713+static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
7714+{
7715+ int t;
7716+
7717+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7718+
7719+ return t;
7720+}
7721+
7722 static __inline__ void atomic_set(atomic_t *v, int i)
7723 {
7724 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7725 }
7726
7727-#define ATOMIC_OP(op, asm_op) \
7728-static __inline__ void atomic_##op(int a, atomic_t *v) \
7729+static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7730+{
7731+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7732+}
7733+
7734+#ifdef CONFIG_PAX_REFCOUNT
7735+#define __REFCOUNT_OP(op) op##o.
7736+#define __OVERFLOW_PRE \
7737+ " mcrxr cr0\n"
7738+#define __OVERFLOW_POST \
7739+ " bf 4*cr0+so, 3f\n" \
7740+ "2: .long 0x00c00b00\n" \
7741+ "3:\n"
7742+#define __OVERFLOW_EXTABLE \
7743+ "\n4:\n"
7744+ _ASM_EXTABLE(2b, 4b)
7745+#else
7746+#define __REFCOUNT_OP(op) op
7747+#define __OVERFLOW_PRE
7748+#define __OVERFLOW_POST
7749+#define __OVERFLOW_EXTABLE
7750+#endif
7751+
7752+#define __ATOMIC_OP(op, suffix, pre_op, asm_op, post_op, extable) \
7753+static inline void atomic_##op##suffix(int a, atomic##suffix##_t *v) \
7754 { \
7755 int t; \
7756 \
7757 __asm__ __volatile__( \
7758-"1: lwarx %0,0,%3 # atomic_" #op "\n" \
7759+"1: lwarx %0,0,%3 # atomic_" #op #suffix "\n" \
7760+ pre_op \
7761 #asm_op " %0,%2,%0\n" \
7762+ post_op \
7763 PPC405_ERR77(0,%3) \
7764 " stwcx. %0,0,%3 \n" \
7765 " bne- 1b\n" \
7766+ extable \
7767 : "=&r" (t), "+m" (v->counter) \
7768 : "r" (a), "r" (&v->counter) \
7769 : "cc"); \
7770 } \
7771
7772-#define ATOMIC_OP_RETURN(op, asm_op) \
7773-static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7774+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , , asm_op, , ) \
7775+ __ATOMIC_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7776+
7777+#define __ATOMIC_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
7778+static inline int atomic_##op##_return##suffix(int a, atomic##suffix##_t *v)\
7779 { \
7780 int t; \
7781 \
7782 __asm__ __volatile__( \
7783 PPC_ATOMIC_ENTRY_BARRIER \
7784-"1: lwarx %0,0,%2 # atomic_" #op "_return\n" \
7785+"1: lwarx %0,0,%2 # atomic_" #op "_return" #suffix "\n" \
7786+ pre_op \
7787 #asm_op " %0,%1,%0\n" \
7788+ post_op \
7789 PPC405_ERR77(0,%2) \
7790 " stwcx. %0,0,%2 \n" \
7791 " bne- 1b\n" \
7792+ extable \
7793 PPC_ATOMIC_EXIT_BARRIER \
7794 : "=&r" (t) \
7795 : "r" (a), "r" (&v->counter) \
7796@@ -62,6 +108,9 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7797 return t; \
7798 }
7799
7800+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , , asm_op, , )\
7801+ __ATOMIC_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7802+
7803 #define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)
7804
7805 ATOMIC_OPS(add, add)
7806@@ -69,42 +118,29 @@ ATOMIC_OPS(sub, subf)
7807
7808 #undef ATOMIC_OPS
7809 #undef ATOMIC_OP_RETURN
7810+#undef __ATOMIC_OP_RETURN
7811 #undef ATOMIC_OP
7812+#undef __ATOMIC_OP
7813
7814 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
7815
7816-static __inline__ void atomic_inc(atomic_t *v)
7817-{
7818- int t;
7819+/*
7820+ * atomic_inc - increment atomic variable
7821+ * @v: pointer of type atomic_t
7822+ *
7823+ * Automatically increments @v by 1
7824+ */
7825+#define atomic_inc(v) atomic_add(1, (v))
7826+#define atomic_inc_return(v) atomic_add_return(1, (v))
7827
7828- __asm__ __volatile__(
7829-"1: lwarx %0,0,%2 # atomic_inc\n\
7830- addic %0,%0,1\n"
7831- PPC405_ERR77(0,%2)
7832-" stwcx. %0,0,%2 \n\
7833- bne- 1b"
7834- : "=&r" (t), "+m" (v->counter)
7835- : "r" (&v->counter)
7836- : "cc", "xer");
7837+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7838+{
7839+ atomic_add_unchecked(1, v);
7840 }
7841
7842-static __inline__ int atomic_inc_return(atomic_t *v)
7843+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7844 {
7845- int t;
7846-
7847- __asm__ __volatile__(
7848- PPC_ATOMIC_ENTRY_BARRIER
7849-"1: lwarx %0,0,%1 # atomic_inc_return\n\
7850- addic %0,%0,1\n"
7851- PPC405_ERR77(0,%1)
7852-" stwcx. %0,0,%1 \n\
7853- bne- 1b"
7854- PPC_ATOMIC_EXIT_BARRIER
7855- : "=&r" (t)
7856- : "r" (&v->counter)
7857- : "cc", "xer", "memory");
7858-
7859- return t;
7860+ return atomic_add_return_unchecked(1, v);
7861 }
7862
7863 /*
7864@@ -117,43 +153,38 @@ static __inline__ int atomic_inc_return(atomic_t *v)
7865 */
7866 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
7867
7868-static __inline__ void atomic_dec(atomic_t *v)
7869+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7870 {
7871- int t;
7872-
7873- __asm__ __volatile__(
7874-"1: lwarx %0,0,%2 # atomic_dec\n\
7875- addic %0,%0,-1\n"
7876- PPC405_ERR77(0,%2)\
7877-" stwcx. %0,0,%2\n\
7878- bne- 1b"
7879- : "=&r" (t), "+m" (v->counter)
7880- : "r" (&v->counter)
7881- : "cc", "xer");
7882+ return atomic_add_return_unchecked(1, v) == 0;
7883 }
7884
7885-static __inline__ int atomic_dec_return(atomic_t *v)
7886+/*
7887+ * atomic_dec - decrement atomic variable
7888+ * @v: pointer of type atomic_t
7889+ *
7890+ * Atomically decrements @v by 1
7891+ */
7892+#define atomic_dec(v) atomic_sub(1, (v))
7893+#define atomic_dec_return(v) atomic_sub_return(1, (v))
7894+
7895+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
7896 {
7897- int t;
7898-
7899- __asm__ __volatile__(
7900- PPC_ATOMIC_ENTRY_BARRIER
7901-"1: lwarx %0,0,%1 # atomic_dec_return\n\
7902- addic %0,%0,-1\n"
7903- PPC405_ERR77(0,%1)
7904-" stwcx. %0,0,%1\n\
7905- bne- 1b"
7906- PPC_ATOMIC_EXIT_BARRIER
7907- : "=&r" (t)
7908- : "r" (&v->counter)
7909- : "cc", "xer", "memory");
7910-
7911- return t;
7912+ atomic_sub_unchecked(1, v);
7913 }
7914
7915 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
7916 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
7917
7918+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7919+{
7920+ return cmpxchg(&(v->counter), old, new);
7921+}
7922+
7923+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7924+{
7925+ return xchg(&(v->counter), new);
7926+}
7927+
7928 /**
7929 * __atomic_add_unless - add unless the number is a given value
7930 * @v: pointer of type atomic_t
7931@@ -171,11 +202,27 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
7932 PPC_ATOMIC_ENTRY_BARRIER
7933 "1: lwarx %0,0,%1 # __atomic_add_unless\n\
7934 cmpw 0,%0,%3 \n\
7935- beq- 2f \n\
7936- add %0,%2,%0 \n"
7937+ beq- 2f \n"
7938+
7939+#ifdef CONFIG_PAX_REFCOUNT
7940+" mcrxr cr0\n"
7941+" addo. %0,%2,%0\n"
7942+" bf 4*cr0+so, 4f\n"
7943+"3:.long " "0x00c00b00""\n"
7944+"4:\n"
7945+#else
7946+ "add %0,%2,%0 \n"
7947+#endif
7948+
7949 PPC405_ERR77(0,%2)
7950 " stwcx. %0,0,%1 \n\
7951 bne- 1b \n"
7952+"5:"
7953+
7954+#ifdef CONFIG_PAX_REFCOUNT
7955+ _ASM_EXTABLE(3b, 5b)
7956+#endif
7957+
7958 PPC_ATOMIC_EXIT_BARRIER
7959 " subf %0,%2,%0 \n\
7960 2:"
7961@@ -248,6 +295,11 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
7962 }
7963 #define atomic_dec_if_positive atomic_dec_if_positive
7964
7965+#define smp_mb__before_atomic_dec() smp_mb()
7966+#define smp_mb__after_atomic_dec() smp_mb()
7967+#define smp_mb__before_atomic_inc() smp_mb()
7968+#define smp_mb__after_atomic_inc() smp_mb()
7969+
7970 #ifdef __powerpc64__
7971
7972 #define ATOMIC64_INIT(i) { (i) }
7973@@ -261,37 +313,60 @@ static __inline__ long atomic64_read(const atomic64_t *v)
7974 return t;
7975 }
7976
7977+static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7978+{
7979+ long t;
7980+
7981+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7982+
7983+ return t;
7984+}
7985+
7986 static __inline__ void atomic64_set(atomic64_t *v, long i)
7987 {
7988 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7989 }
7990
7991-#define ATOMIC64_OP(op, asm_op) \
7992-static __inline__ void atomic64_##op(long a, atomic64_t *v) \
7993+static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7994+{
7995+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7996+}
7997+
7998+#define __ATOMIC64_OP(op, suffix, pre_op, asm_op, post_op, extable) \
7999+static inline void atomic64_##op##suffix(long a, atomic64##suffix##_t *v)\
8000 { \
8001 long t; \
8002 \
8003 __asm__ __volatile__( \
8004 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
8005+ pre_op \
8006 #asm_op " %0,%2,%0\n" \
8007+ post_op \
8008 " stdcx. %0,0,%3 \n" \
8009 " bne- 1b\n" \
8010+ extable \
8011 : "=&r" (t), "+m" (v->counter) \
8012 : "r" (a), "r" (&v->counter) \
8013 : "cc"); \
8014 }
8015
8016-#define ATOMIC64_OP_RETURN(op, asm_op) \
8017-static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8018+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , , asm_op, , ) \
8019+ __ATOMIC64_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8020+
8021+#define __ATOMIC64_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
8022+static inline long atomic64_##op##_return##suffix(long a, atomic64##suffix##_t *v)\
8023 { \
8024 long t; \
8025 \
8026 __asm__ __volatile__( \
8027 PPC_ATOMIC_ENTRY_BARRIER \
8028 "1: ldarx %0,0,%2 # atomic64_" #op "_return\n" \
8029+ pre_op \
8030 #asm_op " %0,%1,%0\n" \
8031+ post_op \
8032 " stdcx. %0,0,%2 \n" \
8033 " bne- 1b\n" \
8034+ extable \
8035 PPC_ATOMIC_EXIT_BARRIER \
8036 : "=&r" (t) \
8037 : "r" (a), "r" (&v->counter) \
8038@@ -300,6 +375,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8039 return t; \
8040 }
8041
8042+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , , asm_op, , )\
8043+ __ATOMIC64_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8044+
8045 #define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)
8046
8047 ATOMIC64_OPS(add, add)
8048@@ -307,40 +385,33 @@ ATOMIC64_OPS(sub, subf)
8049
8050 #undef ATOMIC64_OPS
8051 #undef ATOMIC64_OP_RETURN
8052+#undef __ATOMIC64_OP_RETURN
8053 #undef ATOMIC64_OP
8054+#undef __ATOMIC64_OP
8055+#undef __OVERFLOW_EXTABLE
8056+#undef __OVERFLOW_POST
8057+#undef __OVERFLOW_PRE
8058+#undef __REFCOUNT_OP
8059
8060 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
8061
8062-static __inline__ void atomic64_inc(atomic64_t *v)
8063-{
8064- long t;
8065+/*
8066+ * atomic64_inc - increment atomic variable
8067+ * @v: pointer of type atomic64_t
8068+ *
8069+ * Automatically increments @v by 1
8070+ */
8071+#define atomic64_inc(v) atomic64_add(1, (v))
8072+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
8073
8074- __asm__ __volatile__(
8075-"1: ldarx %0,0,%2 # atomic64_inc\n\
8076- addic %0,%0,1\n\
8077- stdcx. %0,0,%2 \n\
8078- bne- 1b"
8079- : "=&r" (t), "+m" (v->counter)
8080- : "r" (&v->counter)
8081- : "cc", "xer");
8082+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8083+{
8084+ atomic64_add_unchecked(1, v);
8085 }
8086
8087-static __inline__ long atomic64_inc_return(atomic64_t *v)
8088+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8089 {
8090- long t;
8091-
8092- __asm__ __volatile__(
8093- PPC_ATOMIC_ENTRY_BARRIER
8094-"1: ldarx %0,0,%1 # atomic64_inc_return\n\
8095- addic %0,%0,1\n\
8096- stdcx. %0,0,%1 \n\
8097- bne- 1b"
8098- PPC_ATOMIC_EXIT_BARRIER
8099- : "=&r" (t)
8100- : "r" (&v->counter)
8101- : "cc", "xer", "memory");
8102-
8103- return t;
8104+ return atomic64_add_return_unchecked(1, v);
8105 }
8106
8107 /*
8108@@ -353,36 +424,18 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
8109 */
8110 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
8111
8112-static __inline__ void atomic64_dec(atomic64_t *v)
8113+/*
8114+ * atomic64_dec - decrement atomic variable
8115+ * @v: pointer of type atomic64_t
8116+ *
8117+ * Atomically decrements @v by 1
8118+ */
8119+#define atomic64_dec(v) atomic64_sub(1, (v))
8120+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
8121+
8122+static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8123 {
8124- long t;
8125-
8126- __asm__ __volatile__(
8127-"1: ldarx %0,0,%2 # atomic64_dec\n\
8128- addic %0,%0,-1\n\
8129- stdcx. %0,0,%2\n\
8130- bne- 1b"
8131- : "=&r" (t), "+m" (v->counter)
8132- : "r" (&v->counter)
8133- : "cc", "xer");
8134-}
8135-
8136-static __inline__ long atomic64_dec_return(atomic64_t *v)
8137-{
8138- long t;
8139-
8140- __asm__ __volatile__(
8141- PPC_ATOMIC_ENTRY_BARRIER
8142-"1: ldarx %0,0,%1 # atomic64_dec_return\n\
8143- addic %0,%0,-1\n\
8144- stdcx. %0,0,%1\n\
8145- bne- 1b"
8146- PPC_ATOMIC_EXIT_BARRIER
8147- : "=&r" (t)
8148- : "r" (&v->counter)
8149- : "cc", "xer", "memory");
8150-
8151- return t;
8152+ atomic64_sub_unchecked(1, v);
8153 }
8154
8155 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
8156@@ -415,6 +468,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
8157 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8158 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8159
8160+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8161+{
8162+ return cmpxchg(&(v->counter), old, new);
8163+}
8164+
8165+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8166+{
8167+ return xchg(&(v->counter), new);
8168+}
8169+
8170 /**
8171 * atomic64_add_unless - add unless the number is a given value
8172 * @v: pointer of type atomic64_t
8173@@ -430,13 +493,29 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
8174
8175 __asm__ __volatile__ (
8176 PPC_ATOMIC_ENTRY_BARRIER
8177-"1: ldarx %0,0,%1 # __atomic_add_unless\n\
8178+"1: ldarx %0,0,%1 # atomic64_add_unless\n\
8179 cmpd 0,%0,%3 \n\
8180- beq- 2f \n\
8181- add %0,%2,%0 \n"
8182+ beq- 2f \n"
8183+
8184+#ifdef CONFIG_PAX_REFCOUNT
8185+" mcrxr cr0\n"
8186+" addo. %0,%2,%0\n"
8187+" bf 4*cr0+so, 4f\n"
8188+"3:.long " "0x00c00b00""\n"
8189+"4:\n"
8190+#else
8191+ "add %0,%2,%0 \n"
8192+#endif
8193+
8194 " stdcx. %0,0,%1 \n\
8195 bne- 1b \n"
8196 PPC_ATOMIC_EXIT_BARRIER
8197+"5:"
8198+
8199+#ifdef CONFIG_PAX_REFCOUNT
8200+ _ASM_EXTABLE(3b, 5b)
8201+#endif
8202+
8203 " subf %0,%2,%0 \n\
8204 2:"
8205 : "=&r" (t)
8206diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
8207index a3bf5be..e03ba81 100644
8208--- a/arch/powerpc/include/asm/barrier.h
8209+++ b/arch/powerpc/include/asm/barrier.h
8210@@ -76,7 +76,7 @@
8211 do { \
8212 compiletime_assert_atomic_type(*p); \
8213 smp_lwsync(); \
8214- ACCESS_ONCE(*p) = (v); \
8215+ ACCESS_ONCE_RW(*p) = (v); \
8216 } while (0)
8217
8218 #define smp_load_acquire(p) \
8219diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
8220index 34a05a1..a1f2c67 100644
8221--- a/arch/powerpc/include/asm/cache.h
8222+++ b/arch/powerpc/include/asm/cache.h
8223@@ -4,6 +4,7 @@
8224 #ifdef __KERNEL__
8225
8226 #include <asm/reg.h>
8227+#include <linux/const.h>
8228
8229 /* bytes per L1 cache line */
8230 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
8231@@ -23,7 +24,7 @@
8232 #define L1_CACHE_SHIFT 7
8233 #endif
8234
8235-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8236+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8237
8238 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8239
8240diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
8241index 57d289a..b36c98c 100644
8242--- a/arch/powerpc/include/asm/elf.h
8243+++ b/arch/powerpc/include/asm/elf.h
8244@@ -30,6 +30,18 @@
8245
8246 #define ELF_ET_DYN_BASE 0x20000000
8247
8248+#ifdef CONFIG_PAX_ASLR
8249+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
8250+
8251+#ifdef __powerpc64__
8252+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
8253+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
8254+#else
8255+#define PAX_DELTA_MMAP_LEN 15
8256+#define PAX_DELTA_STACK_LEN 15
8257+#endif
8258+#endif
8259+
8260 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
8261
8262 /*
8263@@ -128,10 +140,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8264 (0x7ff >> (PAGE_SHIFT - 12)) : \
8265 (0x3ffff >> (PAGE_SHIFT - 12)))
8266
8267-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8268-#define arch_randomize_brk arch_randomize_brk
8269-
8270-
8271 #ifdef CONFIG_SPU_BASE
8272 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
8273 #define NT_SPU 1
8274diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
8275index 8196e9c..d83a9f3 100644
8276--- a/arch/powerpc/include/asm/exec.h
8277+++ b/arch/powerpc/include/asm/exec.h
8278@@ -4,6 +4,6 @@
8279 #ifndef _ASM_POWERPC_EXEC_H
8280 #define _ASM_POWERPC_EXEC_H
8281
8282-extern unsigned long arch_align_stack(unsigned long sp);
8283+#define arch_align_stack(x) ((x) & ~0xfUL)
8284
8285 #endif /* _ASM_POWERPC_EXEC_H */
8286diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
8287index 5acabbd..7ea14fa 100644
8288--- a/arch/powerpc/include/asm/kmap_types.h
8289+++ b/arch/powerpc/include/asm/kmap_types.h
8290@@ -10,7 +10,7 @@
8291 * 2 of the License, or (at your option) any later version.
8292 */
8293
8294-#define KM_TYPE_NR 16
8295+#define KM_TYPE_NR 17
8296
8297 #endif /* __KERNEL__ */
8298 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
8299diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
8300index b8da913..c02b593 100644
8301--- a/arch/powerpc/include/asm/local.h
8302+++ b/arch/powerpc/include/asm/local.h
8303@@ -9,21 +9,65 @@ typedef struct
8304 atomic_long_t a;
8305 } local_t;
8306
8307+typedef struct
8308+{
8309+ atomic_long_unchecked_t a;
8310+} local_unchecked_t;
8311+
8312 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
8313
8314 #define local_read(l) atomic_long_read(&(l)->a)
8315+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
8316 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
8317+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
8318
8319 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
8320+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
8321 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
8322+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
8323 #define local_inc(l) atomic_long_inc(&(l)->a)
8324+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
8325 #define local_dec(l) atomic_long_dec(&(l)->a)
8326+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
8327
8328 static __inline__ long local_add_return(long a, local_t *l)
8329 {
8330 long t;
8331
8332 __asm__ __volatile__(
8333+"1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n"
8334+
8335+#ifdef CONFIG_PAX_REFCOUNT
8336+" mcrxr cr0\n"
8337+" addo. %0,%1,%0\n"
8338+" bf 4*cr0+so, 3f\n"
8339+"2:.long " "0x00c00b00""\n"
8340+#else
8341+" add %0,%1,%0\n"
8342+#endif
8343+
8344+"3:\n"
8345+ PPC405_ERR77(0,%2)
8346+ PPC_STLCX "%0,0,%2 \n\
8347+ bne- 1b"
8348+
8349+#ifdef CONFIG_PAX_REFCOUNT
8350+"\n4:\n"
8351+ _ASM_EXTABLE(2b, 4b)
8352+#endif
8353+
8354+ : "=&r" (t)
8355+ : "r" (a), "r" (&(l->a.counter))
8356+ : "cc", "memory");
8357+
8358+ return t;
8359+}
8360+
8361+static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l)
8362+{
8363+ long t;
8364+
8365+ __asm__ __volatile__(
8366 "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\
8367 add %0,%1,%0\n"
8368 PPC405_ERR77(0,%2)
8369@@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l)
8370
8371 #define local_cmpxchg(l, o, n) \
8372 (cmpxchg_local(&((l)->a.counter), (o), (n)))
8373+#define local_cmpxchg_unchecked(l, o, n) \
8374+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
8375 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
8376
8377 /**
8378diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
8379index 8565c25..2865190 100644
8380--- a/arch/powerpc/include/asm/mman.h
8381+++ b/arch/powerpc/include/asm/mman.h
8382@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
8383 }
8384 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
8385
8386-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
8387+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
8388 {
8389 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
8390 }
8391diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
8392index 69c0598..2c56964 100644
8393--- a/arch/powerpc/include/asm/page.h
8394+++ b/arch/powerpc/include/asm/page.h
8395@@ -227,8 +227,9 @@ extern long long virt_phys_offset;
8396 * and needs to be executable. This means the whole heap ends
8397 * up being executable.
8398 */
8399-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8400- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8401+#define VM_DATA_DEFAULT_FLAGS32 \
8402+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8403+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8404
8405 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8406 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8407@@ -256,6 +257,9 @@ extern long long virt_phys_offset;
8408 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
8409 #endif
8410
8411+#define ktla_ktva(addr) (addr)
8412+#define ktva_ktla(addr) (addr)
8413+
8414 #ifndef CONFIG_PPC_BOOK3S_64
8415 /*
8416 * Use the top bit of the higher-level page table entries to indicate whether
8417diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
8418index d908a46..3753f71 100644
8419--- a/arch/powerpc/include/asm/page_64.h
8420+++ b/arch/powerpc/include/asm/page_64.h
8421@@ -172,15 +172,18 @@ do { \
8422 * stack by default, so in the absence of a PT_GNU_STACK program header
8423 * we turn execute permission off.
8424 */
8425-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8426- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8427+#define VM_STACK_DEFAULT_FLAGS32 \
8428+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8429+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8430
8431 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8432 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8433
8434+#ifndef CONFIG_PAX_PAGEEXEC
8435 #define VM_STACK_DEFAULT_FLAGS \
8436 (is_32bit_task() ? \
8437 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
8438+#endif
8439
8440 #include <asm-generic/getorder.h>
8441
8442diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
8443index 4b0be20..c15a27d 100644
8444--- a/arch/powerpc/include/asm/pgalloc-64.h
8445+++ b/arch/powerpc/include/asm/pgalloc-64.h
8446@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8447 #ifndef CONFIG_PPC_64K_PAGES
8448
8449 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
8450+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
8451
8452 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
8453 {
8454@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8455 pud_set(pud, (unsigned long)pmd);
8456 }
8457
8458+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8459+{
8460+ pud_populate(mm, pud, pmd);
8461+}
8462+
8463 #define pmd_populate(mm, pmd, pte_page) \
8464 pmd_populate_kernel(mm, pmd, page_address(pte_page))
8465 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
8466@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
8467 #endif
8468
8469 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
8470+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
8471
8472 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
8473 pte_t *pte)
8474diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
8475index 9835ac4..900430f 100644
8476--- a/arch/powerpc/include/asm/pgtable.h
8477+++ b/arch/powerpc/include/asm/pgtable.h
8478@@ -2,6 +2,7 @@
8479 #define _ASM_POWERPC_PGTABLE_H
8480 #ifdef __KERNEL__
8481
8482+#include <linux/const.h>
8483 #ifndef __ASSEMBLY__
8484 #include <linux/mmdebug.h>
8485 #include <linux/mmzone.h>
8486diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
8487index 62cfb0c..50c6402 100644
8488--- a/arch/powerpc/include/asm/pte-hash32.h
8489+++ b/arch/powerpc/include/asm/pte-hash32.h
8490@@ -20,6 +20,7 @@
8491 #define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
8492 #define _PAGE_USER 0x004 /* usermode access allowed */
8493 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
8494+#define _PAGE_EXEC _PAGE_GUARDED
8495 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
8496 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
8497 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
8498diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
8499index af56b5c..f86f3f6 100644
8500--- a/arch/powerpc/include/asm/reg.h
8501+++ b/arch/powerpc/include/asm/reg.h
8502@@ -253,6 +253,7 @@
8503 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
8504 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
8505 #define DSISR_NOHPTE 0x40000000 /* no translation found */
8506+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
8507 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
8508 #define DSISR_ISSTORE 0x02000000 /* access was a store */
8509 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
8510diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
8511index d607df5..08dc9ae 100644
8512--- a/arch/powerpc/include/asm/smp.h
8513+++ b/arch/powerpc/include/asm/smp.h
8514@@ -51,7 +51,7 @@ struct smp_ops_t {
8515 int (*cpu_disable)(void);
8516 void (*cpu_die)(unsigned int nr);
8517 int (*cpu_bootable)(unsigned int nr);
8518-};
8519+} __no_const;
8520
8521 extern void smp_send_debugger_break(void);
8522 extern void start_secondary_resume(void);
8523diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
8524index 4dbe072..b803275 100644
8525--- a/arch/powerpc/include/asm/spinlock.h
8526+++ b/arch/powerpc/include/asm/spinlock.h
8527@@ -204,13 +204,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
8528 __asm__ __volatile__(
8529 "1: " PPC_LWARX(%0,0,%1,1) "\n"
8530 __DO_SIGN_EXTEND
8531-" addic. %0,%0,1\n\
8532- ble- 2f\n"
8533+
8534+#ifdef CONFIG_PAX_REFCOUNT
8535+" mcrxr cr0\n"
8536+" addico. %0,%0,1\n"
8537+" bf 4*cr0+so, 3f\n"
8538+"2:.long " "0x00c00b00""\n"
8539+#else
8540+" addic. %0,%0,1\n"
8541+#endif
8542+
8543+"3:\n"
8544+ "ble- 4f\n"
8545 PPC405_ERR77(0,%1)
8546 " stwcx. %0,0,%1\n\
8547 bne- 1b\n"
8548 PPC_ACQUIRE_BARRIER
8549-"2:" : "=&r" (tmp)
8550+"4:"
8551+
8552+#ifdef CONFIG_PAX_REFCOUNT
8553+ _ASM_EXTABLE(2b,4b)
8554+#endif
8555+
8556+ : "=&r" (tmp)
8557 : "r" (&rw->lock)
8558 : "cr0", "xer", "memory");
8559
8560@@ -286,11 +302,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
8561 __asm__ __volatile__(
8562 "# read_unlock\n\t"
8563 PPC_RELEASE_BARRIER
8564-"1: lwarx %0,0,%1\n\
8565- addic %0,%0,-1\n"
8566+"1: lwarx %0,0,%1\n"
8567+
8568+#ifdef CONFIG_PAX_REFCOUNT
8569+" mcrxr cr0\n"
8570+" addico. %0,%0,-1\n"
8571+" bf 4*cr0+so, 3f\n"
8572+"2:.long " "0x00c00b00""\n"
8573+#else
8574+" addic. %0,%0,-1\n"
8575+#endif
8576+
8577+"3:\n"
8578 PPC405_ERR77(0,%1)
8579 " stwcx. %0,0,%1\n\
8580 bne- 1b"
8581+
8582+#ifdef CONFIG_PAX_REFCOUNT
8583+"\n4:\n"
8584+ _ASM_EXTABLE(2b, 4b)
8585+#endif
8586+
8587 : "=&r"(tmp)
8588 : "r"(&rw->lock)
8589 : "cr0", "xer", "memory");
8590diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
8591index 7248979..80b75de 100644
8592--- a/arch/powerpc/include/asm/thread_info.h
8593+++ b/arch/powerpc/include/asm/thread_info.h
8594@@ -103,6 +103,8 @@ static inline struct thread_info *current_thread_info(void)
8595 #if defined(CONFIG_PPC64)
8596 #define TIF_ELF2ABI 18 /* function descriptors must die! */
8597 #endif
8598+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
8599+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
8600
8601 /* as above, but as bit values */
8602 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
8603@@ -121,9 +123,10 @@ static inline struct thread_info *current_thread_info(void)
8604 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8605 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
8606 #define _TIF_NOHZ (1<<TIF_NOHZ)
8607+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8608 #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
8609 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
8610- _TIF_NOHZ)
8611+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
8612
8613 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
8614 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
8615diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
8616index a0c071d..49cdc7f 100644
8617--- a/arch/powerpc/include/asm/uaccess.h
8618+++ b/arch/powerpc/include/asm/uaccess.h
8619@@ -58,6 +58,7 @@
8620
8621 #endif
8622
8623+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
8624 #define access_ok(type, addr, size) \
8625 (__chk_user_ptr(addr), \
8626 __access_ok((__force unsigned long)(addr), (size), get_fs()))
8627@@ -318,52 +319,6 @@ do { \
8628 extern unsigned long __copy_tofrom_user(void __user *to,
8629 const void __user *from, unsigned long size);
8630
8631-#ifndef __powerpc64__
8632-
8633-static inline unsigned long copy_from_user(void *to,
8634- const void __user *from, unsigned long n)
8635-{
8636- unsigned long over;
8637-
8638- if (access_ok(VERIFY_READ, from, n))
8639- return __copy_tofrom_user((__force void __user *)to, from, n);
8640- if ((unsigned long)from < TASK_SIZE) {
8641- over = (unsigned long)from + n - TASK_SIZE;
8642- return __copy_tofrom_user((__force void __user *)to, from,
8643- n - over) + over;
8644- }
8645- return n;
8646-}
8647-
8648-static inline unsigned long copy_to_user(void __user *to,
8649- const void *from, unsigned long n)
8650-{
8651- unsigned long over;
8652-
8653- if (access_ok(VERIFY_WRITE, to, n))
8654- return __copy_tofrom_user(to, (__force void __user *)from, n);
8655- if ((unsigned long)to < TASK_SIZE) {
8656- over = (unsigned long)to + n - TASK_SIZE;
8657- return __copy_tofrom_user(to, (__force void __user *)from,
8658- n - over) + over;
8659- }
8660- return n;
8661-}
8662-
8663-#else /* __powerpc64__ */
8664-
8665-#define __copy_in_user(to, from, size) \
8666- __copy_tofrom_user((to), (from), (size))
8667-
8668-extern unsigned long copy_from_user(void *to, const void __user *from,
8669- unsigned long n);
8670-extern unsigned long copy_to_user(void __user *to, const void *from,
8671- unsigned long n);
8672-extern unsigned long copy_in_user(void __user *to, const void __user *from,
8673- unsigned long n);
8674-
8675-#endif /* __powerpc64__ */
8676-
8677 static inline unsigned long __copy_from_user_inatomic(void *to,
8678 const void __user *from, unsigned long n)
8679 {
8680@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
8681 if (ret == 0)
8682 return 0;
8683 }
8684+
8685+ if (!__builtin_constant_p(n))
8686+ check_object_size(to, n, false);
8687+
8688 return __copy_tofrom_user((__force void __user *)to, from, n);
8689 }
8690
8691@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
8692 if (ret == 0)
8693 return 0;
8694 }
8695+
8696+ if (!__builtin_constant_p(n))
8697+ check_object_size(from, n, true);
8698+
8699 return __copy_tofrom_user(to, (__force const void __user *)from, n);
8700 }
8701
8702@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
8703 return __copy_to_user_inatomic(to, from, size);
8704 }
8705
8706+#ifndef __powerpc64__
8707+
8708+static inline unsigned long __must_check copy_from_user(void *to,
8709+ const void __user *from, unsigned long n)
8710+{
8711+ unsigned long over;
8712+
8713+ if ((long)n < 0)
8714+ return n;
8715+
8716+ if (access_ok(VERIFY_READ, from, n)) {
8717+ if (!__builtin_constant_p(n))
8718+ check_object_size(to, n, false);
8719+ return __copy_tofrom_user((__force void __user *)to, from, n);
8720+ }
8721+ if ((unsigned long)from < TASK_SIZE) {
8722+ over = (unsigned long)from + n - TASK_SIZE;
8723+ if (!__builtin_constant_p(n - over))
8724+ check_object_size(to, n - over, false);
8725+ return __copy_tofrom_user((__force void __user *)to, from,
8726+ n - over) + over;
8727+ }
8728+ return n;
8729+}
8730+
8731+static inline unsigned long __must_check copy_to_user(void __user *to,
8732+ const void *from, unsigned long n)
8733+{
8734+ unsigned long over;
8735+
8736+ if ((long)n < 0)
8737+ return n;
8738+
8739+ if (access_ok(VERIFY_WRITE, to, n)) {
8740+ if (!__builtin_constant_p(n))
8741+ check_object_size(from, n, true);
8742+ return __copy_tofrom_user(to, (__force void __user *)from, n);
8743+ }
8744+ if ((unsigned long)to < TASK_SIZE) {
8745+ over = (unsigned long)to + n - TASK_SIZE;
8746+ if (!__builtin_constant_p(n))
8747+ check_object_size(from, n - over, true);
8748+ return __copy_tofrom_user(to, (__force void __user *)from,
8749+ n - over) + over;
8750+ }
8751+ return n;
8752+}
8753+
8754+#else /* __powerpc64__ */
8755+
8756+#define __copy_in_user(to, from, size) \
8757+ __copy_tofrom_user((to), (from), (size))
8758+
8759+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
8760+{
8761+ if ((long)n < 0 || n > INT_MAX)
8762+ return n;
8763+
8764+ if (!__builtin_constant_p(n))
8765+ check_object_size(to, n, false);
8766+
8767+ if (likely(access_ok(VERIFY_READ, from, n)))
8768+ n = __copy_from_user(to, from, n);
8769+ else
8770+ memset(to, 0, n);
8771+ return n;
8772+}
8773+
8774+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
8775+{
8776+ if ((long)n < 0 || n > INT_MAX)
8777+ return n;
8778+
8779+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
8780+ if (!__builtin_constant_p(n))
8781+ check_object_size(from, n, true);
8782+ n = __copy_to_user(to, from, n);
8783+ }
8784+ return n;
8785+}
8786+
8787+extern unsigned long copy_in_user(void __user *to, const void __user *from,
8788+ unsigned long n);
8789+
8790+#endif /* __powerpc64__ */
8791+
8792 extern unsigned long __clear_user(void __user *addr, unsigned long size);
8793
8794 static inline unsigned long clear_user(void __user *addr, unsigned long size)
8795diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
8796index 502cf69..53936a1 100644
8797--- a/arch/powerpc/kernel/Makefile
8798+++ b/arch/powerpc/kernel/Makefile
8799@@ -15,6 +15,11 @@ CFLAGS_prom_init.o += -fPIC
8800 CFLAGS_btext.o += -fPIC
8801 endif
8802
8803+CFLAGS_REMOVE_cputable.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8804+CFLAGS_REMOVE_prom_init.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8805+CFLAGS_REMOVE_btext.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8806+CFLAGS_REMOVE_prom.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8807+
8808 ifdef CONFIG_FUNCTION_TRACER
8809 # Do not trace early boot code
8810 CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
8811@@ -27,6 +32,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
8812 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
8813 endif
8814
8815+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8816+
8817 obj-y := cputable.o ptrace.o syscalls.o \
8818 irq.o align.o signal_32.o pmc.o vdso.o \
8819 process.o systbl.o idle.o \
8820diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
8821index 3e68d1c..72a5ee6 100644
8822--- a/arch/powerpc/kernel/exceptions-64e.S
8823+++ b/arch/powerpc/kernel/exceptions-64e.S
8824@@ -1010,6 +1010,7 @@ storage_fault_common:
8825 std r14,_DAR(r1)
8826 std r15,_DSISR(r1)
8827 addi r3,r1,STACK_FRAME_OVERHEAD
8828+ bl save_nvgprs
8829 mr r4,r14
8830 mr r5,r15
8831 ld r14,PACA_EXGEN+EX_R14(r13)
8832@@ -1018,8 +1019,7 @@ storage_fault_common:
8833 cmpdi r3,0
8834 bne- 1f
8835 b ret_from_except_lite
8836-1: bl save_nvgprs
8837- mr r5,r3
8838+1: mr r5,r3
8839 addi r3,r1,STACK_FRAME_OVERHEAD
8840 ld r4,_DAR(r1)
8841 bl bad_page_fault
8842diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
8843index 9519e6b..13f6c38 100644
8844--- a/arch/powerpc/kernel/exceptions-64s.S
8845+++ b/arch/powerpc/kernel/exceptions-64s.S
8846@@ -1599,10 +1599,10 @@ handle_page_fault:
8847 11: ld r4,_DAR(r1)
8848 ld r5,_DSISR(r1)
8849 addi r3,r1,STACK_FRAME_OVERHEAD
8850+ bl save_nvgprs
8851 bl do_page_fault
8852 cmpdi r3,0
8853 beq+ 12f
8854- bl save_nvgprs
8855 mr r5,r3
8856 addi r3,r1,STACK_FRAME_OVERHEAD
8857 lwz r4,_DAR(r1)
8858diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
8859index 4509603..cdb491f 100644
8860--- a/arch/powerpc/kernel/irq.c
8861+++ b/arch/powerpc/kernel/irq.c
8862@@ -460,6 +460,8 @@ void migrate_irqs(void)
8863 }
8864 #endif
8865
8866+extern void gr_handle_kernel_exploit(void);
8867+
8868 static inline void check_stack_overflow(void)
8869 {
8870 #ifdef CONFIG_DEBUG_STACKOVERFLOW
8871@@ -472,6 +474,7 @@ static inline void check_stack_overflow(void)
8872 pr_err("do_IRQ: stack overflow: %ld\n",
8873 sp - sizeof(struct thread_info));
8874 dump_stack();
8875+ gr_handle_kernel_exploit();
8876 }
8877 #endif
8878 }
8879diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
8880index c94d2e0..992a9ce 100644
8881--- a/arch/powerpc/kernel/module_32.c
8882+++ b/arch/powerpc/kernel/module_32.c
8883@@ -158,7 +158,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
8884 me->arch.core_plt_section = i;
8885 }
8886 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
8887- pr_err("Module doesn't contain .plt or .init.plt sections.\n");
8888+ pr_err("Module $s doesn't contain .plt or .init.plt sections.\n", me->name);
8889 return -ENOEXEC;
8890 }
8891
8892@@ -188,11 +188,16 @@ static uint32_t do_plt_call(void *location,
8893
8894 pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
8895 /* Init, or core PLT? */
8896- if (location >= mod->module_core
8897- && location < mod->module_core + mod->core_size)
8898+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
8899+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
8900 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
8901- else
8902+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
8903+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
8904 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
8905+ else {
8906+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
8907+ return ~0UL;
8908+ }
8909
8910 /* Find this entry, or if that fails, the next avail. entry */
8911 while (entry->jump[0]) {
8912@@ -296,7 +301,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
8913 }
8914 #ifdef CONFIG_DYNAMIC_FTRACE
8915 module->arch.tramp =
8916- do_plt_call(module->module_core,
8917+ do_plt_call(module->module_core_rx,
8918 (unsigned long)ftrace_caller,
8919 sechdrs, module);
8920 #endif
8921diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
8922index b4cc7be..1fe8bb3 100644
8923--- a/arch/powerpc/kernel/process.c
8924+++ b/arch/powerpc/kernel/process.c
8925@@ -1036,8 +1036,8 @@ void show_regs(struct pt_regs * regs)
8926 * Lookup NIP late so we have the best change of getting the
8927 * above info out without failing
8928 */
8929- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
8930- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
8931+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
8932+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
8933 #endif
8934 show_stack(current, (unsigned long *) regs->gpr[1]);
8935 if (!user_mode(regs))
8936@@ -1549,10 +1549,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8937 newsp = stack[0];
8938 ip = stack[STACK_FRAME_LR_SAVE];
8939 if (!firstframe || ip != lr) {
8940- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
8941+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
8942 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8943 if ((ip == rth) && curr_frame >= 0) {
8944- printk(" (%pS)",
8945+ printk(" (%pA)",
8946 (void *)current->ret_stack[curr_frame].ret);
8947 curr_frame--;
8948 }
8949@@ -1572,7 +1572,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8950 struct pt_regs *regs = (struct pt_regs *)
8951 (sp + STACK_FRAME_OVERHEAD);
8952 lr = regs->link;
8953- printk("--- interrupt: %lx at %pS\n LR = %pS\n",
8954+ printk("--- interrupt: %lx at %pA\n LR = %pA\n",
8955 regs->trap, (void *)regs->nip, (void *)lr);
8956 firstframe = 1;
8957 }
8958@@ -1608,49 +1608,3 @@ void notrace __ppc64_runlatch_off(void)
8959 mtspr(SPRN_CTRLT, ctrl);
8960 }
8961 #endif /* CONFIG_PPC64 */
8962-
8963-unsigned long arch_align_stack(unsigned long sp)
8964-{
8965- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8966- sp -= get_random_int() & ~PAGE_MASK;
8967- return sp & ~0xf;
8968-}
8969-
8970-static inline unsigned long brk_rnd(void)
8971-{
8972- unsigned long rnd = 0;
8973-
8974- /* 8MB for 32bit, 1GB for 64bit */
8975- if (is_32bit_task())
8976- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
8977- else
8978- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
8979-
8980- return rnd << PAGE_SHIFT;
8981-}
8982-
8983-unsigned long arch_randomize_brk(struct mm_struct *mm)
8984-{
8985- unsigned long base = mm->brk;
8986- unsigned long ret;
8987-
8988-#ifdef CONFIG_PPC_STD_MMU_64
8989- /*
8990- * If we are using 1TB segments and we are allowed to randomise
8991- * the heap, we can put it above 1TB so it is backed by a 1TB
8992- * segment. Otherwise the heap will be in the bottom 1TB
8993- * which always uses 256MB segments and this may result in a
8994- * performance penalty.
8995- */
8996- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
8997- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
8998-#endif
8999-
9000- ret = PAGE_ALIGN(base + brk_rnd());
9001-
9002- if (ret < mm->brk)
9003- return mm->brk;
9004-
9005- return ret;
9006-}
9007-
9008diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
9009index f21897b..28c0428 100644
9010--- a/arch/powerpc/kernel/ptrace.c
9011+++ b/arch/powerpc/kernel/ptrace.c
9012@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
9013 return ret;
9014 }
9015
9016+#ifdef CONFIG_GRKERNSEC_SETXID
9017+extern void gr_delayed_cred_worker(void);
9018+#endif
9019+
9020 /*
9021 * We must return the syscall number to actually look up in the table.
9022 * This can be -1L to skip running any syscall at all.
9023@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
9024
9025 secure_computing_strict(regs->gpr[0]);
9026
9027+#ifdef CONFIG_GRKERNSEC_SETXID
9028+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9029+ gr_delayed_cred_worker();
9030+#endif
9031+
9032 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
9033 tracehook_report_syscall_entry(regs))
9034 /*
9035@@ -1805,6 +1814,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
9036 {
9037 int step;
9038
9039+#ifdef CONFIG_GRKERNSEC_SETXID
9040+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9041+ gr_delayed_cred_worker();
9042+#endif
9043+
9044 audit_syscall_exit(regs);
9045
9046 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9047diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
9048index d3a831a..3a33123 100644
9049--- a/arch/powerpc/kernel/signal_32.c
9050+++ b/arch/powerpc/kernel/signal_32.c
9051@@ -1011,7 +1011,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
9052 /* Save user registers on the stack */
9053 frame = &rt_sf->uc.uc_mcontext;
9054 addr = frame;
9055- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
9056+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9057 sigret = 0;
9058 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
9059 } else {
9060diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
9061index c7c24d2..1bf7039 100644
9062--- a/arch/powerpc/kernel/signal_64.c
9063+++ b/arch/powerpc/kernel/signal_64.c
9064@@ -754,7 +754,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
9065 current->thread.fp_state.fpscr = 0;
9066
9067 /* Set up to return from userspace. */
9068- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
9069+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9070 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
9071 } else {
9072 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
9073diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
9074index 19e4744..28a8d7b 100644
9075--- a/arch/powerpc/kernel/traps.c
9076+++ b/arch/powerpc/kernel/traps.c
9077@@ -36,6 +36,7 @@
9078 #include <linux/debugfs.h>
9079 #include <linux/ratelimit.h>
9080 #include <linux/context_tracking.h>
9081+#include <linux/uaccess.h>
9082
9083 #include <asm/emulated_ops.h>
9084 #include <asm/pgtable.h>
9085@@ -142,6 +143,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
9086 return flags;
9087 }
9088
9089+extern void gr_handle_kernel_exploit(void);
9090+
9091 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9092 int signr)
9093 {
9094@@ -191,6 +194,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9095 panic("Fatal exception in interrupt");
9096 if (panic_on_oops)
9097 panic("Fatal exception");
9098+
9099+ gr_handle_kernel_exploit();
9100+
9101 do_exit(signr);
9102 }
9103
9104@@ -1137,6 +1143,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
9105 enum ctx_state prev_state = exception_enter();
9106 unsigned int reason = get_reason(regs);
9107
9108+#ifdef CONFIG_PAX_REFCOUNT
9109+ unsigned int bkpt;
9110+ const struct exception_table_entry *entry;
9111+
9112+ if (reason & REASON_ILLEGAL) {
9113+ /* Check if PaX bad instruction */
9114+ if (!probe_kernel_address(regs->nip, bkpt) && bkpt == 0xc00b00) {
9115+ current->thread.trap_nr = 0;
9116+ pax_report_refcount_overflow(regs);
9117+ /* fixup_exception() for PowerPC does not exist, simulate its job */
9118+ if ((entry = search_exception_tables(regs->nip)) != NULL) {
9119+ regs->nip = entry->fixup;
9120+ return;
9121+ }
9122+ /* fixup_exception() could not handle */
9123+ goto bail;
9124+ }
9125+ }
9126+#endif
9127+
9128 /* We can now get here via a FP Unavailable exception if the core
9129 * has no FPU, in that case the reason flags will be 0 */
9130
9131diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
9132index 305eb0d..accc5b40 100644
9133--- a/arch/powerpc/kernel/vdso.c
9134+++ b/arch/powerpc/kernel/vdso.c
9135@@ -34,6 +34,7 @@
9136 #include <asm/vdso.h>
9137 #include <asm/vdso_datapage.h>
9138 #include <asm/setup.h>
9139+#include <asm/mman.h>
9140
9141 #undef DEBUG
9142
9143@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9144 vdso_base = VDSO32_MBASE;
9145 #endif
9146
9147- current->mm->context.vdso_base = 0;
9148+ current->mm->context.vdso_base = ~0UL;
9149
9150 /* vDSO has a problem and was disabled, just don't "enable" it for the
9151 * process
9152@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9153 vdso_base = get_unmapped_area(NULL, vdso_base,
9154 (vdso_pages << PAGE_SHIFT) +
9155 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
9156- 0, 0);
9157+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
9158 if (IS_ERR_VALUE(vdso_base)) {
9159 rc = vdso_base;
9160 goto fail_mmapsem;
9161diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
9162index 27c0fac..6ec4a32 100644
9163--- a/arch/powerpc/kvm/powerpc.c
9164+++ b/arch/powerpc/kvm/powerpc.c
9165@@ -1402,7 +1402,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
9166 }
9167 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
9168
9169-int kvm_arch_init(void *opaque)
9170+int kvm_arch_init(const void *opaque)
9171 {
9172 return 0;
9173 }
9174diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
9175index 5eea6f3..5d10396 100644
9176--- a/arch/powerpc/lib/usercopy_64.c
9177+++ b/arch/powerpc/lib/usercopy_64.c
9178@@ -9,22 +9,6 @@
9179 #include <linux/module.h>
9180 #include <asm/uaccess.h>
9181
9182-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9183-{
9184- if (likely(access_ok(VERIFY_READ, from, n)))
9185- n = __copy_from_user(to, from, n);
9186- else
9187- memset(to, 0, n);
9188- return n;
9189-}
9190-
9191-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9192-{
9193- if (likely(access_ok(VERIFY_WRITE, to, n)))
9194- n = __copy_to_user(to, from, n);
9195- return n;
9196-}
9197-
9198 unsigned long copy_in_user(void __user *to, const void __user *from,
9199 unsigned long n)
9200 {
9201@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
9202 return n;
9203 }
9204
9205-EXPORT_SYMBOL(copy_from_user);
9206-EXPORT_SYMBOL(copy_to_user);
9207 EXPORT_SYMBOL(copy_in_user);
9208
9209diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
9210index b396868..3eb6b9f 100644
9211--- a/arch/powerpc/mm/fault.c
9212+++ b/arch/powerpc/mm/fault.c
9213@@ -33,6 +33,10 @@
9214 #include <linux/ratelimit.h>
9215 #include <linux/context_tracking.h>
9216 #include <linux/hugetlb.h>
9217+#include <linux/slab.h>
9218+#include <linux/pagemap.h>
9219+#include <linux/compiler.h>
9220+#include <linux/unistd.h>
9221
9222 #include <asm/firmware.h>
9223 #include <asm/page.h>
9224@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
9225 }
9226 #endif
9227
9228+#ifdef CONFIG_PAX_PAGEEXEC
9229+/*
9230+ * PaX: decide what to do with offenders (regs->nip = fault address)
9231+ *
9232+ * returns 1 when task should be killed
9233+ */
9234+static int pax_handle_fetch_fault(struct pt_regs *regs)
9235+{
9236+ return 1;
9237+}
9238+
9239+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9240+{
9241+ unsigned long i;
9242+
9243+ printk(KERN_ERR "PAX: bytes at PC: ");
9244+ for (i = 0; i < 5; i++) {
9245+ unsigned int c;
9246+ if (get_user(c, (unsigned int __user *)pc+i))
9247+ printk(KERN_CONT "???????? ");
9248+ else
9249+ printk(KERN_CONT "%08x ", c);
9250+ }
9251+ printk("\n");
9252+}
9253+#endif
9254+
9255 /*
9256 * Check whether the instruction at regs->nip is a store using
9257 * an update addressing form which will update r1.
9258@@ -227,7 +258,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
9259 * indicate errors in DSISR but can validly be set in SRR1.
9260 */
9261 if (trap == 0x400)
9262- error_code &= 0x48200000;
9263+ error_code &= 0x58200000;
9264 else
9265 is_write = error_code & DSISR_ISSTORE;
9266 #else
9267@@ -383,12 +414,16 @@ good_area:
9268 * "undefined". Of those that can be set, this is the only
9269 * one which seems bad.
9270 */
9271- if (error_code & 0x10000000)
9272+ if (error_code & DSISR_GUARDED)
9273 /* Guarded storage error. */
9274 goto bad_area;
9275 #endif /* CONFIG_8xx */
9276
9277 if (is_exec) {
9278+#ifdef CONFIG_PPC_STD_MMU
9279+ if (error_code & DSISR_GUARDED)
9280+ goto bad_area;
9281+#endif
9282 /*
9283 * Allow execution from readable areas if the MMU does not
9284 * provide separate controls over reading and executing.
9285@@ -483,6 +518,23 @@ bad_area:
9286 bad_area_nosemaphore:
9287 /* User mode accesses cause a SIGSEGV */
9288 if (user_mode(regs)) {
9289+
9290+#ifdef CONFIG_PAX_PAGEEXEC
9291+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
9292+#ifdef CONFIG_PPC_STD_MMU
9293+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
9294+#else
9295+ if (is_exec && regs->nip == address) {
9296+#endif
9297+ switch (pax_handle_fetch_fault(regs)) {
9298+ }
9299+
9300+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
9301+ do_group_exit(SIGKILL);
9302+ }
9303+ }
9304+#endif
9305+
9306 _exception(SIGSEGV, regs, code, address);
9307 goto bail;
9308 }
9309diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
9310index cb8bdbe..cde4bc7 100644
9311--- a/arch/powerpc/mm/mmap.c
9312+++ b/arch/powerpc/mm/mmap.c
9313@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
9314 return sysctl_legacy_va_layout;
9315 }
9316
9317-static unsigned long mmap_rnd(void)
9318+static unsigned long mmap_rnd(struct mm_struct *mm)
9319 {
9320 unsigned long rnd = 0;
9321
9322+#ifdef CONFIG_PAX_RANDMMAP
9323+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9324+#endif
9325+
9326 if (current->flags & PF_RANDOMIZE) {
9327 /* 8MB for 32bit, 1GB for 64bit */
9328 if (is_32bit_task())
9329@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
9330 return rnd << PAGE_SHIFT;
9331 }
9332
9333-static inline unsigned long mmap_base(void)
9334+static inline unsigned long mmap_base(struct mm_struct *mm)
9335 {
9336 unsigned long gap = rlimit(RLIMIT_STACK);
9337
9338@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
9339 else if (gap > MAX_GAP)
9340 gap = MAX_GAP;
9341
9342- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
9343+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
9344 }
9345
9346 /*
9347@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9348 */
9349 if (mmap_is_legacy()) {
9350 mm->mmap_base = TASK_UNMAPPED_BASE;
9351+
9352+#ifdef CONFIG_PAX_RANDMMAP
9353+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9354+ mm->mmap_base += mm->delta_mmap;
9355+#endif
9356+
9357 mm->get_unmapped_area = arch_get_unmapped_area;
9358 } else {
9359- mm->mmap_base = mmap_base();
9360+ mm->mmap_base = mmap_base(mm);
9361+
9362+#ifdef CONFIG_PAX_RANDMMAP
9363+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9364+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9365+#endif
9366+
9367 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9368 }
9369 }
9370diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
9371index 0f432a7..abfe841 100644
9372--- a/arch/powerpc/mm/slice.c
9373+++ b/arch/powerpc/mm/slice.c
9374@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
9375 if ((mm->task_size - len) < addr)
9376 return 0;
9377 vma = find_vma(mm, addr);
9378- return (!vma || (addr + len) <= vma->vm_start);
9379+ return check_heap_stack_gap(vma, addr, len, 0);
9380 }
9381
9382 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
9383@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
9384 info.align_offset = 0;
9385
9386 addr = TASK_UNMAPPED_BASE;
9387+
9388+#ifdef CONFIG_PAX_RANDMMAP
9389+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9390+ addr += mm->delta_mmap;
9391+#endif
9392+
9393 while (addr < TASK_SIZE) {
9394 info.low_limit = addr;
9395 if (!slice_scan_available(addr, available, 1, &addr))
9396@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
9397 if (fixed && addr > (mm->task_size - len))
9398 return -ENOMEM;
9399
9400+#ifdef CONFIG_PAX_RANDMMAP
9401+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
9402+ addr = 0;
9403+#endif
9404+
9405 /* If hint, make sure it matches our alignment restrictions */
9406 if (!fixed && addr) {
9407 addr = _ALIGN_UP(addr, 1ul << pshift);
9408diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
9409index d966bbe..372124a 100644
9410--- a/arch/powerpc/platforms/cell/spufs/file.c
9411+++ b/arch/powerpc/platforms/cell/spufs/file.c
9412@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9413 return VM_FAULT_NOPAGE;
9414 }
9415
9416-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
9417+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
9418 unsigned long address,
9419- void *buf, int len, int write)
9420+ void *buf, size_t len, int write)
9421 {
9422 struct spu_context *ctx = vma->vm_file->private_data;
9423 unsigned long offset = address - vma->vm_start;
9424diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
9425index fa934fe..c296056 100644
9426--- a/arch/s390/include/asm/atomic.h
9427+++ b/arch/s390/include/asm/atomic.h
9428@@ -412,4 +412,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
9429 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
9430 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9431
9432+#define atomic64_read_unchecked(v) atomic64_read(v)
9433+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
9434+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
9435+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
9436+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
9437+#define atomic64_inc_unchecked(v) atomic64_inc(v)
9438+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
9439+#define atomic64_dec_unchecked(v) atomic64_dec(v)
9440+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
9441+
9442 #endif /* __ARCH_S390_ATOMIC__ */
9443diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
9444index 8d72471..5322500 100644
9445--- a/arch/s390/include/asm/barrier.h
9446+++ b/arch/s390/include/asm/barrier.h
9447@@ -42,7 +42,7 @@
9448 do { \
9449 compiletime_assert_atomic_type(*p); \
9450 barrier(); \
9451- ACCESS_ONCE(*p) = (v); \
9452+ ACCESS_ONCE_RW(*p) = (v); \
9453 } while (0)
9454
9455 #define smp_load_acquire(p) \
9456diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
9457index 4d7ccac..d03d0ad 100644
9458--- a/arch/s390/include/asm/cache.h
9459+++ b/arch/s390/include/asm/cache.h
9460@@ -9,8 +9,10 @@
9461 #ifndef __ARCH_S390_CACHE_H
9462 #define __ARCH_S390_CACHE_H
9463
9464-#define L1_CACHE_BYTES 256
9465+#include <linux/const.h>
9466+
9467 #define L1_CACHE_SHIFT 8
9468+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9469 #define NET_SKB_PAD 32
9470
9471 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9472diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
9473index c9c875d..b4b0e4c 100644
9474--- a/arch/s390/include/asm/elf.h
9475+++ b/arch/s390/include/asm/elf.h
9476@@ -163,8 +163,14 @@ extern unsigned int vdso_enabled;
9477 the loader. We need to make sure that it is out of the way of the program
9478 that it will "exec", and that there is sufficient room for the brk. */
9479
9480-extern unsigned long randomize_et_dyn(void);
9481-#define ELF_ET_DYN_BASE randomize_et_dyn()
9482+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
9483+
9484+#ifdef CONFIG_PAX_ASLR
9485+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
9486+
9487+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9488+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9489+#endif
9490
9491 /* This yields a mask that user programs can use to figure out what
9492 instruction set this CPU supports. */
9493@@ -225,9 +231,6 @@ struct linux_binprm;
9494 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
9495 int arch_setup_additional_pages(struct linux_binprm *, int);
9496
9497-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9498-#define arch_randomize_brk arch_randomize_brk
9499-
9500 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
9501
9502 #endif
9503diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
9504index c4a93d6..4d2a9b4 100644
9505--- a/arch/s390/include/asm/exec.h
9506+++ b/arch/s390/include/asm/exec.h
9507@@ -7,6 +7,6 @@
9508 #ifndef __ASM_EXEC_H
9509 #define __ASM_EXEC_H
9510
9511-extern unsigned long arch_align_stack(unsigned long sp);
9512+#define arch_align_stack(x) ((x) & ~0xfUL)
9513
9514 #endif /* __ASM_EXEC_H */
9515diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
9516index cd4c68e..6764641 100644
9517--- a/arch/s390/include/asm/uaccess.h
9518+++ b/arch/s390/include/asm/uaccess.h
9519@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
9520 __range_ok((unsigned long)(addr), (size)); \
9521 })
9522
9523+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
9524 #define access_ok(type, addr, size) __access_ok(addr, size)
9525
9526 /*
9527@@ -275,6 +276,10 @@ static inline unsigned long __must_check
9528 copy_to_user(void __user *to, const void *from, unsigned long n)
9529 {
9530 might_fault();
9531+
9532+ if ((long)n < 0)
9533+ return n;
9534+
9535 return __copy_to_user(to, from, n);
9536 }
9537
9538@@ -303,10 +308,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
9539 static inline unsigned long __must_check
9540 copy_from_user(void *to, const void __user *from, unsigned long n)
9541 {
9542- unsigned int sz = __compiletime_object_size(to);
9543+ size_t sz = __compiletime_object_size(to);
9544
9545 might_fault();
9546- if (unlikely(sz != -1 && sz < n)) {
9547+
9548+ if ((long)n < 0)
9549+ return n;
9550+
9551+ if (unlikely(sz != (size_t)-1 && sz < n)) {
9552 copy_from_user_overflow();
9553 return n;
9554 }
9555diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
9556index 2ca9586..55682a9 100644
9557--- a/arch/s390/kernel/module.c
9558+++ b/arch/s390/kernel/module.c
9559@@ -165,11 +165,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
9560
9561 /* Increase core size by size of got & plt and set start
9562 offsets for got and plt. */
9563- me->core_size = ALIGN(me->core_size, 4);
9564- me->arch.got_offset = me->core_size;
9565- me->core_size += me->arch.got_size;
9566- me->arch.plt_offset = me->core_size;
9567- me->core_size += me->arch.plt_size;
9568+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
9569+ me->arch.got_offset = me->core_size_rw;
9570+ me->core_size_rw += me->arch.got_size;
9571+ me->arch.plt_offset = me->core_size_rx;
9572+ me->core_size_rx += me->arch.plt_size;
9573 return 0;
9574 }
9575
9576@@ -285,7 +285,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9577 if (info->got_initialized == 0) {
9578 Elf_Addr *gotent;
9579
9580- gotent = me->module_core + me->arch.got_offset +
9581+ gotent = me->module_core_rw + me->arch.got_offset +
9582 info->got_offset;
9583 *gotent = val;
9584 info->got_initialized = 1;
9585@@ -308,7 +308,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9586 rc = apply_rela_bits(loc, val, 0, 64, 0);
9587 else if (r_type == R_390_GOTENT ||
9588 r_type == R_390_GOTPLTENT) {
9589- val += (Elf_Addr) me->module_core - loc;
9590+ val += (Elf_Addr) me->module_core_rw - loc;
9591 rc = apply_rela_bits(loc, val, 1, 32, 1);
9592 }
9593 break;
9594@@ -321,7 +321,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9595 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
9596 if (info->plt_initialized == 0) {
9597 unsigned int *ip;
9598- ip = me->module_core + me->arch.plt_offset +
9599+ ip = me->module_core_rx + me->arch.plt_offset +
9600 info->plt_offset;
9601 #ifndef CONFIG_64BIT
9602 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
9603@@ -346,7 +346,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9604 val - loc + 0xffffUL < 0x1ffffeUL) ||
9605 (r_type == R_390_PLT32DBL &&
9606 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
9607- val = (Elf_Addr) me->module_core +
9608+ val = (Elf_Addr) me->module_core_rx +
9609 me->arch.plt_offset +
9610 info->plt_offset;
9611 val += rela->r_addend - loc;
9612@@ -368,7 +368,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9613 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
9614 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
9615 val = val + rela->r_addend -
9616- ((Elf_Addr) me->module_core + me->arch.got_offset);
9617+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
9618 if (r_type == R_390_GOTOFF16)
9619 rc = apply_rela_bits(loc, val, 0, 16, 0);
9620 else if (r_type == R_390_GOTOFF32)
9621@@ -378,7 +378,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9622 break;
9623 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
9624 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
9625- val = (Elf_Addr) me->module_core + me->arch.got_offset +
9626+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
9627 rela->r_addend - loc;
9628 if (r_type == R_390_GOTPC)
9629 rc = apply_rela_bits(loc, val, 1, 32, 0);
9630diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
9631index 13fc097..84d375f 100644
9632--- a/arch/s390/kernel/process.c
9633+++ b/arch/s390/kernel/process.c
9634@@ -227,27 +227,3 @@ unsigned long get_wchan(struct task_struct *p)
9635 }
9636 return 0;
9637 }
9638-
9639-unsigned long arch_align_stack(unsigned long sp)
9640-{
9641- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9642- sp -= get_random_int() & ~PAGE_MASK;
9643- return sp & ~0xf;
9644-}
9645-
9646-static inline unsigned long brk_rnd(void)
9647-{
9648- /* 8MB for 32bit, 1GB for 64bit */
9649- if (is_32bit_task())
9650- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
9651- else
9652- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
9653-}
9654-
9655-unsigned long arch_randomize_brk(struct mm_struct *mm)
9656-{
9657- unsigned long ret;
9658-
9659- ret = PAGE_ALIGN(mm->brk + brk_rnd());
9660- return (ret > mm->brk) ? ret : mm->brk;
9661-}
9662diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
9663index 179a2c2..371e85c 100644
9664--- a/arch/s390/mm/mmap.c
9665+++ b/arch/s390/mm/mmap.c
9666@@ -204,9 +204,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9667 */
9668 if (mmap_is_legacy()) {
9669 mm->mmap_base = mmap_base_legacy();
9670+
9671+#ifdef CONFIG_PAX_RANDMMAP
9672+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9673+ mm->mmap_base += mm->delta_mmap;
9674+#endif
9675+
9676 mm->get_unmapped_area = arch_get_unmapped_area;
9677 } else {
9678 mm->mmap_base = mmap_base();
9679+
9680+#ifdef CONFIG_PAX_RANDMMAP
9681+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9682+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9683+#endif
9684+
9685 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9686 }
9687 }
9688@@ -279,9 +291,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9689 */
9690 if (mmap_is_legacy()) {
9691 mm->mmap_base = mmap_base_legacy();
9692+
9693+#ifdef CONFIG_PAX_RANDMMAP
9694+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9695+ mm->mmap_base += mm->delta_mmap;
9696+#endif
9697+
9698 mm->get_unmapped_area = s390_get_unmapped_area;
9699 } else {
9700 mm->mmap_base = mmap_base();
9701+
9702+#ifdef CONFIG_PAX_RANDMMAP
9703+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9704+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9705+#endif
9706+
9707 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
9708 }
9709 }
9710diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
9711index ae3d59f..f65f075 100644
9712--- a/arch/score/include/asm/cache.h
9713+++ b/arch/score/include/asm/cache.h
9714@@ -1,7 +1,9 @@
9715 #ifndef _ASM_SCORE_CACHE_H
9716 #define _ASM_SCORE_CACHE_H
9717
9718+#include <linux/const.h>
9719+
9720 #define L1_CACHE_SHIFT 4
9721-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9722+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9723
9724 #endif /* _ASM_SCORE_CACHE_H */
9725diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
9726index f9f3cd5..58ff438 100644
9727--- a/arch/score/include/asm/exec.h
9728+++ b/arch/score/include/asm/exec.h
9729@@ -1,6 +1,6 @@
9730 #ifndef _ASM_SCORE_EXEC_H
9731 #define _ASM_SCORE_EXEC_H
9732
9733-extern unsigned long arch_align_stack(unsigned long sp);
9734+#define arch_align_stack(x) (x)
9735
9736 #endif /* _ASM_SCORE_EXEC_H */
9737diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
9738index a1519ad3..e8ac1ff 100644
9739--- a/arch/score/kernel/process.c
9740+++ b/arch/score/kernel/process.c
9741@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
9742
9743 return task_pt_regs(task)->cp0_epc;
9744 }
9745-
9746-unsigned long arch_align_stack(unsigned long sp)
9747-{
9748- return sp;
9749-}
9750diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
9751index ef9e555..331bd29 100644
9752--- a/arch/sh/include/asm/cache.h
9753+++ b/arch/sh/include/asm/cache.h
9754@@ -9,10 +9,11 @@
9755 #define __ASM_SH_CACHE_H
9756 #ifdef __KERNEL__
9757
9758+#include <linux/const.h>
9759 #include <linux/init.h>
9760 #include <cpu/cache.h>
9761
9762-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9763+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9764
9765 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9766
9767diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
9768index 6777177..cb5e44f 100644
9769--- a/arch/sh/mm/mmap.c
9770+++ b/arch/sh/mm/mmap.c
9771@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9772 struct mm_struct *mm = current->mm;
9773 struct vm_area_struct *vma;
9774 int do_colour_align;
9775+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9776 struct vm_unmapped_area_info info;
9777
9778 if (flags & MAP_FIXED) {
9779@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9780 if (filp || (flags & MAP_SHARED))
9781 do_colour_align = 1;
9782
9783+#ifdef CONFIG_PAX_RANDMMAP
9784+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9785+#endif
9786+
9787 if (addr) {
9788 if (do_colour_align)
9789 addr = COLOUR_ALIGN(addr, pgoff);
9790@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9791 addr = PAGE_ALIGN(addr);
9792
9793 vma = find_vma(mm, addr);
9794- if (TASK_SIZE - len >= addr &&
9795- (!vma || addr + len <= vma->vm_start))
9796+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9797 return addr;
9798 }
9799
9800 info.flags = 0;
9801 info.length = len;
9802- info.low_limit = TASK_UNMAPPED_BASE;
9803+ info.low_limit = mm->mmap_base;
9804 info.high_limit = TASK_SIZE;
9805 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
9806 info.align_offset = pgoff << PAGE_SHIFT;
9807@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9808 struct mm_struct *mm = current->mm;
9809 unsigned long addr = addr0;
9810 int do_colour_align;
9811+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9812 struct vm_unmapped_area_info info;
9813
9814 if (flags & MAP_FIXED) {
9815@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9816 if (filp || (flags & MAP_SHARED))
9817 do_colour_align = 1;
9818
9819+#ifdef CONFIG_PAX_RANDMMAP
9820+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9821+#endif
9822+
9823 /* requesting a specific address */
9824 if (addr) {
9825 if (do_colour_align)
9826@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9827 addr = PAGE_ALIGN(addr);
9828
9829 vma = find_vma(mm, addr);
9830- if (TASK_SIZE - len >= addr &&
9831- (!vma || addr + len <= vma->vm_start))
9832+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9833 return addr;
9834 }
9835
9836@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9837 VM_BUG_ON(addr != -ENOMEM);
9838 info.flags = 0;
9839 info.low_limit = TASK_UNMAPPED_BASE;
9840+
9841+#ifdef CONFIG_PAX_RANDMMAP
9842+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9843+ info.low_limit += mm->delta_mmap;
9844+#endif
9845+
9846 info.high_limit = TASK_SIZE;
9847 addr = vm_unmapped_area(&info);
9848 }
9849diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
9850index 4082749..fd97781 100644
9851--- a/arch/sparc/include/asm/atomic_64.h
9852+++ b/arch/sparc/include/asm/atomic_64.h
9853@@ -15,18 +15,38 @@
9854 #define ATOMIC64_INIT(i) { (i) }
9855
9856 #define atomic_read(v) ACCESS_ONCE((v)->counter)
9857+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9858+{
9859+ return ACCESS_ONCE(v->counter);
9860+}
9861 #define atomic64_read(v) ACCESS_ONCE((v)->counter)
9862+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9863+{
9864+ return ACCESS_ONCE(v->counter);
9865+}
9866
9867 #define atomic_set(v, i) (((v)->counter) = i)
9868+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9869+{
9870+ v->counter = i;
9871+}
9872 #define atomic64_set(v, i) (((v)->counter) = i)
9873+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9874+{
9875+ v->counter = i;
9876+}
9877
9878-#define ATOMIC_OP(op) \
9879-void atomic_##op(int, atomic_t *); \
9880-void atomic64_##op(long, atomic64_t *);
9881+#define __ATOMIC_OP(op, suffix) \
9882+void atomic_##op##suffix(int, atomic##suffix##_t *); \
9883+void atomic64_##op##suffix(long, atomic64##suffix##_t *);
9884
9885-#define ATOMIC_OP_RETURN(op) \
9886-int atomic_##op##_return(int, atomic_t *); \
9887-long atomic64_##op##_return(long, atomic64_t *);
9888+#define ATOMIC_OP(op) __ATOMIC_OP(op, ) __ATOMIC_OP(op, _unchecked)
9889+
9890+#define __ATOMIC_OP_RETURN(op, suffix) \
9891+int atomic_##op##_return##suffix(int, atomic##suffix##_t *); \
9892+long atomic64_##op##_return##suffix(long, atomic64##suffix##_t *);
9893+
9894+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, ) __ATOMIC_OP_RETURN(op, _unchecked)
9895
9896 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
9897
9898@@ -35,13 +55,23 @@ ATOMIC_OPS(sub)
9899
9900 #undef ATOMIC_OPS
9901 #undef ATOMIC_OP_RETURN
9902+#undef __ATOMIC_OP_RETURN
9903 #undef ATOMIC_OP
9904+#undef __ATOMIC_OP
9905
9906 #define atomic_dec_return(v) atomic_sub_return(1, v)
9907 #define atomic64_dec_return(v) atomic64_sub_return(1, v)
9908
9909 #define atomic_inc_return(v) atomic_add_return(1, v)
9910+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9911+{
9912+ return atomic_add_return_unchecked(1, v);
9913+}
9914 #define atomic64_inc_return(v) atomic64_add_return(1, v)
9915+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9916+{
9917+ return atomic64_add_return_unchecked(1, v);
9918+}
9919
9920 /*
9921 * atomic_inc_and_test - increment and test
9922@@ -52,6 +82,10 @@ ATOMIC_OPS(sub)
9923 * other cases.
9924 */
9925 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
9926+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9927+{
9928+ return atomic_inc_return_unchecked(v) == 0;
9929+}
9930 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
9931
9932 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
9933@@ -61,25 +95,60 @@ ATOMIC_OPS(sub)
9934 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
9935
9936 #define atomic_inc(v) atomic_add(1, v)
9937+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9938+{
9939+ atomic_add_unchecked(1, v);
9940+}
9941 #define atomic64_inc(v) atomic64_add(1, v)
9942+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9943+{
9944+ atomic64_add_unchecked(1, v);
9945+}
9946
9947 #define atomic_dec(v) atomic_sub(1, v)
9948+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9949+{
9950+ atomic_sub_unchecked(1, v);
9951+}
9952 #define atomic64_dec(v) atomic64_sub(1, v)
9953+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9954+{
9955+ atomic64_sub_unchecked(1, v);
9956+}
9957
9958 #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
9959 #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
9960
9961 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
9962+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9963+{
9964+ return cmpxchg(&v->counter, old, new);
9965+}
9966 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
9967+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9968+{
9969+ return xchg(&v->counter, new);
9970+}
9971
9972 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9973 {
9974- int c, old;
9975+ int c, old, new;
9976 c = atomic_read(v);
9977 for (;;) {
9978- if (unlikely(c == (u)))
9979+ if (unlikely(c == u))
9980 break;
9981- old = atomic_cmpxchg((v), c, c + (a));
9982+
9983+ asm volatile("addcc %2, %0, %0\n"
9984+
9985+#ifdef CONFIG_PAX_REFCOUNT
9986+ "tvs %%icc, 6\n"
9987+#endif
9988+
9989+ : "=r" (new)
9990+ : "0" (c), "ir" (a)
9991+ : "cc");
9992+
9993+ old = atomic_cmpxchg(v, c, new);
9994 if (likely(old == c))
9995 break;
9996 c = old;
9997@@ -90,20 +159,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9998 #define atomic64_cmpxchg(v, o, n) \
9999 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
10000 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
10001+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10002+{
10003+ return xchg(&v->counter, new);
10004+}
10005
10006 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10007 {
10008- long c, old;
10009+ long c, old, new;
10010 c = atomic64_read(v);
10011 for (;;) {
10012- if (unlikely(c == (u)))
10013+ if (unlikely(c == u))
10014 break;
10015- old = atomic64_cmpxchg((v), c, c + (a));
10016+
10017+ asm volatile("addcc %2, %0, %0\n"
10018+
10019+#ifdef CONFIG_PAX_REFCOUNT
10020+ "tvs %%xcc, 6\n"
10021+#endif
10022+
10023+ : "=r" (new)
10024+ : "0" (c), "ir" (a)
10025+ : "cc");
10026+
10027+ old = atomic64_cmpxchg(v, c, new);
10028 if (likely(old == c))
10029 break;
10030 c = old;
10031 }
10032- return c != (u);
10033+ return c != u;
10034 }
10035
10036 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10037diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
10038index 7664894..45a974b 100644
10039--- a/arch/sparc/include/asm/barrier_64.h
10040+++ b/arch/sparc/include/asm/barrier_64.h
10041@@ -60,7 +60,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
10042 do { \
10043 compiletime_assert_atomic_type(*p); \
10044 barrier(); \
10045- ACCESS_ONCE(*p) = (v); \
10046+ ACCESS_ONCE_RW(*p) = (v); \
10047 } while (0)
10048
10049 #define smp_load_acquire(p) \
10050diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
10051index 5bb6991..5c2132e 100644
10052--- a/arch/sparc/include/asm/cache.h
10053+++ b/arch/sparc/include/asm/cache.h
10054@@ -7,10 +7,12 @@
10055 #ifndef _SPARC_CACHE_H
10056 #define _SPARC_CACHE_H
10057
10058+#include <linux/const.h>
10059+
10060 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
10061
10062 #define L1_CACHE_SHIFT 5
10063-#define L1_CACHE_BYTES 32
10064+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10065
10066 #ifdef CONFIG_SPARC32
10067 #define SMP_CACHE_BYTES_SHIFT 5
10068diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
10069index a24e41f..47677ff 100644
10070--- a/arch/sparc/include/asm/elf_32.h
10071+++ b/arch/sparc/include/asm/elf_32.h
10072@@ -114,6 +114,13 @@ typedef struct {
10073
10074 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
10075
10076+#ifdef CONFIG_PAX_ASLR
10077+#define PAX_ELF_ET_DYN_BASE 0x10000UL
10078+
10079+#define PAX_DELTA_MMAP_LEN 16
10080+#define PAX_DELTA_STACK_LEN 16
10081+#endif
10082+
10083 /* This yields a mask that user programs can use to figure out what
10084 instruction set this cpu supports. This can NOT be done in userspace
10085 on Sparc. */
10086diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
10087index 370ca1e..d4f4a98 100644
10088--- a/arch/sparc/include/asm/elf_64.h
10089+++ b/arch/sparc/include/asm/elf_64.h
10090@@ -189,6 +189,13 @@ typedef struct {
10091 #define ELF_ET_DYN_BASE 0x0000010000000000UL
10092 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
10093
10094+#ifdef CONFIG_PAX_ASLR
10095+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
10096+
10097+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
10098+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
10099+#endif
10100+
10101 extern unsigned long sparc64_elf_hwcap;
10102 #define ELF_HWCAP sparc64_elf_hwcap
10103
10104diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
10105index a3890da..f6a408e 100644
10106--- a/arch/sparc/include/asm/pgalloc_32.h
10107+++ b/arch/sparc/include/asm/pgalloc_32.h
10108@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
10109 }
10110
10111 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
10112+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10113
10114 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
10115 unsigned long address)
10116diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
10117index 5e31871..13469c6 100644
10118--- a/arch/sparc/include/asm/pgalloc_64.h
10119+++ b/arch/sparc/include/asm/pgalloc_64.h
10120@@ -21,6 +21,7 @@ static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
10121 }
10122
10123 #define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
10124+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10125
10126 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
10127 {
10128@@ -38,6 +39,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
10129 }
10130
10131 #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
10132+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
10133
10134 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
10135 {
10136diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
10137index 59ba6f6..4518128 100644
10138--- a/arch/sparc/include/asm/pgtable.h
10139+++ b/arch/sparc/include/asm/pgtable.h
10140@@ -5,4 +5,8 @@
10141 #else
10142 #include <asm/pgtable_32.h>
10143 #endif
10144+
10145+#define ktla_ktva(addr) (addr)
10146+#define ktva_ktla(addr) (addr)
10147+
10148 #endif
10149diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
10150index f06b36a..bca3189 100644
10151--- a/arch/sparc/include/asm/pgtable_32.h
10152+++ b/arch/sparc/include/asm/pgtable_32.h
10153@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
10154 #define PAGE_SHARED SRMMU_PAGE_SHARED
10155 #define PAGE_COPY SRMMU_PAGE_COPY
10156 #define PAGE_READONLY SRMMU_PAGE_RDONLY
10157+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
10158+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
10159+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
10160 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
10161
10162 /* Top-level page directory - dummy used by init-mm.
10163@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
10164
10165 /* xwr */
10166 #define __P000 PAGE_NONE
10167-#define __P001 PAGE_READONLY
10168-#define __P010 PAGE_COPY
10169-#define __P011 PAGE_COPY
10170+#define __P001 PAGE_READONLY_NOEXEC
10171+#define __P010 PAGE_COPY_NOEXEC
10172+#define __P011 PAGE_COPY_NOEXEC
10173 #define __P100 PAGE_READONLY
10174 #define __P101 PAGE_READONLY
10175 #define __P110 PAGE_COPY
10176 #define __P111 PAGE_COPY
10177
10178 #define __S000 PAGE_NONE
10179-#define __S001 PAGE_READONLY
10180-#define __S010 PAGE_SHARED
10181-#define __S011 PAGE_SHARED
10182+#define __S001 PAGE_READONLY_NOEXEC
10183+#define __S010 PAGE_SHARED_NOEXEC
10184+#define __S011 PAGE_SHARED_NOEXEC
10185 #define __S100 PAGE_READONLY
10186 #define __S101 PAGE_READONLY
10187 #define __S110 PAGE_SHARED
10188diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
10189index ae51a11..eadfd03 100644
10190--- a/arch/sparc/include/asm/pgtsrmmu.h
10191+++ b/arch/sparc/include/asm/pgtsrmmu.h
10192@@ -111,6 +111,11 @@
10193 SRMMU_EXEC | SRMMU_REF)
10194 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
10195 SRMMU_EXEC | SRMMU_REF)
10196+
10197+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
10198+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10199+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10200+
10201 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
10202 SRMMU_DIRTY | SRMMU_REF)
10203
10204diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
10205index 29d64b1..4272fe8 100644
10206--- a/arch/sparc/include/asm/setup.h
10207+++ b/arch/sparc/include/asm/setup.h
10208@@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
10209 void handle_ld_nf(u32 insn, struct pt_regs *regs);
10210
10211 /* init_64.c */
10212-extern atomic_t dcpage_flushes;
10213-extern atomic_t dcpage_flushes_xcall;
10214+extern atomic_unchecked_t dcpage_flushes;
10215+extern atomic_unchecked_t dcpage_flushes_xcall;
10216
10217 extern int sysctl_tsb_ratio;
10218 #endif
10219diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
10220index 9689176..63c18ea 100644
10221--- a/arch/sparc/include/asm/spinlock_64.h
10222+++ b/arch/sparc/include/asm/spinlock_64.h
10223@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
10224
10225 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
10226
10227-static void inline arch_read_lock(arch_rwlock_t *lock)
10228+static inline void arch_read_lock(arch_rwlock_t *lock)
10229 {
10230 unsigned long tmp1, tmp2;
10231
10232 __asm__ __volatile__ (
10233 "1: ldsw [%2], %0\n"
10234 " brlz,pn %0, 2f\n"
10235-"4: add %0, 1, %1\n"
10236+"4: addcc %0, 1, %1\n"
10237+
10238+#ifdef CONFIG_PAX_REFCOUNT
10239+" tvs %%icc, 6\n"
10240+#endif
10241+
10242 " cas [%2], %0, %1\n"
10243 " cmp %0, %1\n"
10244 " bne,pn %%icc, 1b\n"
10245@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
10246 " .previous"
10247 : "=&r" (tmp1), "=&r" (tmp2)
10248 : "r" (lock)
10249- : "memory");
10250+ : "memory", "cc");
10251 }
10252
10253-static int inline arch_read_trylock(arch_rwlock_t *lock)
10254+static inline int arch_read_trylock(arch_rwlock_t *lock)
10255 {
10256 int tmp1, tmp2;
10257
10258@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10259 "1: ldsw [%2], %0\n"
10260 " brlz,a,pn %0, 2f\n"
10261 " mov 0, %0\n"
10262-" add %0, 1, %1\n"
10263+" addcc %0, 1, %1\n"
10264+
10265+#ifdef CONFIG_PAX_REFCOUNT
10266+" tvs %%icc, 6\n"
10267+#endif
10268+
10269 " cas [%2], %0, %1\n"
10270 " cmp %0, %1\n"
10271 " bne,pn %%icc, 1b\n"
10272@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10273 return tmp1;
10274 }
10275
10276-static void inline arch_read_unlock(arch_rwlock_t *lock)
10277+static inline void arch_read_unlock(arch_rwlock_t *lock)
10278 {
10279 unsigned long tmp1, tmp2;
10280
10281 __asm__ __volatile__(
10282 "1: lduw [%2], %0\n"
10283-" sub %0, 1, %1\n"
10284+" subcc %0, 1, %1\n"
10285+
10286+#ifdef CONFIG_PAX_REFCOUNT
10287+" tvs %%icc, 6\n"
10288+#endif
10289+
10290 " cas [%2], %0, %1\n"
10291 " cmp %0, %1\n"
10292 " bne,pn %%xcc, 1b\n"
10293@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
10294 : "memory");
10295 }
10296
10297-static void inline arch_write_lock(arch_rwlock_t *lock)
10298+static inline void arch_write_lock(arch_rwlock_t *lock)
10299 {
10300 unsigned long mask, tmp1, tmp2;
10301
10302@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
10303 : "memory");
10304 }
10305
10306-static void inline arch_write_unlock(arch_rwlock_t *lock)
10307+static inline void arch_write_unlock(arch_rwlock_t *lock)
10308 {
10309 __asm__ __volatile__(
10310 " stw %%g0, [%0]"
10311@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
10312 : "memory");
10313 }
10314
10315-static int inline arch_write_trylock(arch_rwlock_t *lock)
10316+static inline int arch_write_trylock(arch_rwlock_t *lock)
10317 {
10318 unsigned long mask, tmp1, tmp2, result;
10319
10320diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
10321index fd7bd0a..2e2fa7a 100644
10322--- a/arch/sparc/include/asm/thread_info_32.h
10323+++ b/arch/sparc/include/asm/thread_info_32.h
10324@@ -47,6 +47,7 @@ struct thread_info {
10325 struct reg_window32 reg_window[NSWINS]; /* align for ldd! */
10326 unsigned long rwbuf_stkptrs[NSWINS];
10327 unsigned long w_saved;
10328+ unsigned long lowest_stack;
10329 };
10330
10331 /*
10332diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
10333index ff45516..73001ab 100644
10334--- a/arch/sparc/include/asm/thread_info_64.h
10335+++ b/arch/sparc/include/asm/thread_info_64.h
10336@@ -61,6 +61,8 @@ struct thread_info {
10337 struct pt_regs *kern_una_regs;
10338 unsigned int kern_una_insn;
10339
10340+ unsigned long lowest_stack;
10341+
10342 unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
10343 __attribute__ ((aligned(64)));
10344 };
10345@@ -184,12 +186,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
10346 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
10347 /* flag bit 4 is available */
10348 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
10349-/* flag bit 6 is available */
10350+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
10351 #define TIF_32BIT 7 /* 32-bit binary */
10352 #define TIF_NOHZ 8 /* in adaptive nohz mode */
10353 #define TIF_SECCOMP 9 /* secure computing */
10354 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
10355 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
10356+
10357 /* NOTE: Thread flags >= 12 should be ones we have no interest
10358 * in using in assembly, else we can't use the mask as
10359 * an immediate value in instructions such as andcc.
10360@@ -209,12 +212,17 @@ register struct thread_info *current_thread_info_reg asm("g6");
10361 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
10362 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
10363 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
10364+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
10365
10366 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
10367 _TIF_DO_NOTIFY_RESUME_MASK | \
10368 _TIF_NEED_RESCHED)
10369 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
10370
10371+#define _TIF_WORK_SYSCALL \
10372+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
10373+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
10374+
10375 #define is_32bit_task() (test_thread_flag(TIF_32BIT))
10376
10377 /*
10378diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
10379index bd56c28..4b63d83 100644
10380--- a/arch/sparc/include/asm/uaccess.h
10381+++ b/arch/sparc/include/asm/uaccess.h
10382@@ -1,5 +1,6 @@
10383 #ifndef ___ASM_SPARC_UACCESS_H
10384 #define ___ASM_SPARC_UACCESS_H
10385+
10386 #if defined(__sparc__) && defined(__arch64__)
10387 #include <asm/uaccess_64.h>
10388 #else
10389diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
10390index 64ee103..388aef0 100644
10391--- a/arch/sparc/include/asm/uaccess_32.h
10392+++ b/arch/sparc/include/asm/uaccess_32.h
10393@@ -47,6 +47,7 @@
10394 #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
10395 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
10396 #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
10397+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
10398 #define access_ok(type, addr, size) \
10399 ({ (void)(type); __access_ok((unsigned long)(addr), size); })
10400
10401@@ -313,27 +314,46 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
10402
10403 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
10404 {
10405- if (n && __access_ok((unsigned long) to, n))
10406+ if ((long)n < 0)
10407+ return n;
10408+
10409+ if (n && __access_ok((unsigned long) to, n)) {
10410+ if (!__builtin_constant_p(n))
10411+ check_object_size(from, n, true);
10412 return __copy_user(to, (__force void __user *) from, n);
10413- else
10414+ } else
10415 return n;
10416 }
10417
10418 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
10419 {
10420+ if ((long)n < 0)
10421+ return n;
10422+
10423+ if (!__builtin_constant_p(n))
10424+ check_object_size(from, n, true);
10425+
10426 return __copy_user(to, (__force void __user *) from, n);
10427 }
10428
10429 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
10430 {
10431- if (n && __access_ok((unsigned long) from, n))
10432+ if ((long)n < 0)
10433+ return n;
10434+
10435+ if (n && __access_ok((unsigned long) from, n)) {
10436+ if (!__builtin_constant_p(n))
10437+ check_object_size(to, n, false);
10438 return __copy_user((__force void __user *) to, from, n);
10439- else
10440+ } else
10441 return n;
10442 }
10443
10444 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
10445 {
10446+ if ((long)n < 0)
10447+ return n;
10448+
10449 return __copy_user((__force void __user *) to, from, n);
10450 }
10451
10452diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
10453index a35194b..47dabc0d 100644
10454--- a/arch/sparc/include/asm/uaccess_64.h
10455+++ b/arch/sparc/include/asm/uaccess_64.h
10456@@ -10,6 +10,7 @@
10457 #include <linux/compiler.h>
10458 #include <linux/string.h>
10459 #include <linux/thread_info.h>
10460+#include <linux/kernel.h>
10461 #include <asm/asi.h>
10462 #include <asm/spitfire.h>
10463 #include <asm-generic/uaccess-unaligned.h>
10464@@ -54,6 +55,11 @@ static inline int __access_ok(const void __user * addr, unsigned long size)
10465 return 1;
10466 }
10467
10468+static inline int access_ok_noprefault(int type, const void __user * addr, unsigned long size)
10469+{
10470+ return 1;
10471+}
10472+
10473 static inline int access_ok(int type, const void __user * addr, unsigned long size)
10474 {
10475 return 1;
10476@@ -228,8 +234,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
10477 static inline unsigned long __must_check
10478 copy_from_user(void *to, const void __user *from, unsigned long size)
10479 {
10480- unsigned long ret = ___copy_from_user(to, from, size);
10481+ unsigned long ret;
10482
10483+ if ((long)size < 0 || size > INT_MAX)
10484+ return size;
10485+
10486+ if (!__builtin_constant_p(size))
10487+ check_object_size(to, size, false);
10488+
10489+ ret = ___copy_from_user(to, from, size);
10490 if (unlikely(ret))
10491 ret = copy_from_user_fixup(to, from, size);
10492
10493@@ -245,8 +258,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
10494 static inline unsigned long __must_check
10495 copy_to_user(void __user *to, const void *from, unsigned long size)
10496 {
10497- unsigned long ret = ___copy_to_user(to, from, size);
10498+ unsigned long ret;
10499
10500+ if ((long)size < 0 || size > INT_MAX)
10501+ return size;
10502+
10503+ if (!__builtin_constant_p(size))
10504+ check_object_size(from, size, true);
10505+
10506+ ret = ___copy_to_user(to, from, size);
10507 if (unlikely(ret))
10508 ret = copy_to_user_fixup(to, from, size);
10509 return ret;
10510diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
10511index 7cf9c6e..6206648 100644
10512--- a/arch/sparc/kernel/Makefile
10513+++ b/arch/sparc/kernel/Makefile
10514@@ -4,7 +4,7 @@
10515 #
10516
10517 asflags-y := -ansi
10518-ccflags-y := -Werror
10519+#ccflags-y := -Werror
10520
10521 extra-y := head_$(BITS).o
10522
10523diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
10524index 50e7b62..79fae35 100644
10525--- a/arch/sparc/kernel/process_32.c
10526+++ b/arch/sparc/kernel/process_32.c
10527@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
10528
10529 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
10530 r->psr, r->pc, r->npc, r->y, print_tainted());
10531- printk("PC: <%pS>\n", (void *) r->pc);
10532+ printk("PC: <%pA>\n", (void *) r->pc);
10533 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10534 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
10535 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
10536 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10537 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
10538 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
10539- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
10540+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
10541
10542 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10543 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
10544@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10545 rw = (struct reg_window32 *) fp;
10546 pc = rw->ins[7];
10547 printk("[%08lx : ", pc);
10548- printk("%pS ] ", (void *) pc);
10549+ printk("%pA ] ", (void *) pc);
10550 fp = rw->ins[6];
10551 } while (++count < 16);
10552 printk("\n");
10553diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
10554index 46a5964..a35c62c 100644
10555--- a/arch/sparc/kernel/process_64.c
10556+++ b/arch/sparc/kernel/process_64.c
10557@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
10558 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
10559 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
10560 if (regs->tstate & TSTATE_PRIV)
10561- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
10562+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
10563 }
10564
10565 void show_regs(struct pt_regs *regs)
10566@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
10567
10568 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
10569 regs->tpc, regs->tnpc, regs->y, print_tainted());
10570- printk("TPC: <%pS>\n", (void *) regs->tpc);
10571+ printk("TPC: <%pA>\n", (void *) regs->tpc);
10572 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
10573 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
10574 regs->u_regs[3]);
10575@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
10576 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
10577 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
10578 regs->u_regs[15]);
10579- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
10580+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
10581 show_regwindow(regs);
10582 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
10583 }
10584@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
10585 ((tp && tp->task) ? tp->task->pid : -1));
10586
10587 if (gp->tstate & TSTATE_PRIV) {
10588- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
10589+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
10590 (void *) gp->tpc,
10591 (void *) gp->o7,
10592 (void *) gp->i7,
10593diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
10594index 79cc0d1..ec62734 100644
10595--- a/arch/sparc/kernel/prom_common.c
10596+++ b/arch/sparc/kernel/prom_common.c
10597@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
10598
10599 unsigned int prom_early_allocated __initdata;
10600
10601-static struct of_pdt_ops prom_sparc_ops __initdata = {
10602+static struct of_pdt_ops prom_sparc_ops __initconst = {
10603 .nextprop = prom_common_nextprop,
10604 .getproplen = prom_getproplen,
10605 .getproperty = prom_getproperty,
10606diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
10607index 9ddc492..27a5619 100644
10608--- a/arch/sparc/kernel/ptrace_64.c
10609+++ b/arch/sparc/kernel/ptrace_64.c
10610@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
10611 return ret;
10612 }
10613
10614+#ifdef CONFIG_GRKERNSEC_SETXID
10615+extern void gr_delayed_cred_worker(void);
10616+#endif
10617+
10618 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10619 {
10620 int ret = 0;
10621@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10622 if (test_thread_flag(TIF_NOHZ))
10623 user_exit();
10624
10625+#ifdef CONFIG_GRKERNSEC_SETXID
10626+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10627+ gr_delayed_cred_worker();
10628+#endif
10629+
10630 if (test_thread_flag(TIF_SYSCALL_TRACE))
10631 ret = tracehook_report_syscall_entry(regs);
10632
10633@@ -1088,6 +1097,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
10634 if (test_thread_flag(TIF_NOHZ))
10635 user_exit();
10636
10637+#ifdef CONFIG_GRKERNSEC_SETXID
10638+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10639+ gr_delayed_cred_worker();
10640+#endif
10641+
10642 audit_syscall_exit(regs);
10643
10644 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
10645diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
10646index 61139d9..c1a5f28 100644
10647--- a/arch/sparc/kernel/smp_64.c
10648+++ b/arch/sparc/kernel/smp_64.c
10649@@ -887,7 +887,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10650 return;
10651
10652 #ifdef CONFIG_DEBUG_DCFLUSH
10653- atomic_inc(&dcpage_flushes);
10654+ atomic_inc_unchecked(&dcpage_flushes);
10655 #endif
10656
10657 this_cpu = get_cpu();
10658@@ -911,7 +911,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10659 xcall_deliver(data0, __pa(pg_addr),
10660 (u64) pg_addr, cpumask_of(cpu));
10661 #ifdef CONFIG_DEBUG_DCFLUSH
10662- atomic_inc(&dcpage_flushes_xcall);
10663+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10664 #endif
10665 }
10666 }
10667@@ -930,7 +930,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10668 preempt_disable();
10669
10670 #ifdef CONFIG_DEBUG_DCFLUSH
10671- atomic_inc(&dcpage_flushes);
10672+ atomic_inc_unchecked(&dcpage_flushes);
10673 #endif
10674 data0 = 0;
10675 pg_addr = page_address(page);
10676@@ -947,7 +947,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10677 xcall_deliver(data0, __pa(pg_addr),
10678 (u64) pg_addr, cpu_online_mask);
10679 #ifdef CONFIG_DEBUG_DCFLUSH
10680- atomic_inc(&dcpage_flushes_xcall);
10681+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10682 #endif
10683 }
10684 __local_flush_dcache_page(page);
10685diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
10686index 646988d..b88905f 100644
10687--- a/arch/sparc/kernel/sys_sparc_32.c
10688+++ b/arch/sparc/kernel/sys_sparc_32.c
10689@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10690 if (len > TASK_SIZE - PAGE_SIZE)
10691 return -ENOMEM;
10692 if (!addr)
10693- addr = TASK_UNMAPPED_BASE;
10694+ addr = current->mm->mmap_base;
10695
10696 info.flags = 0;
10697 info.length = len;
10698diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
10699index 30e7ddb..266a3b0 100644
10700--- a/arch/sparc/kernel/sys_sparc_64.c
10701+++ b/arch/sparc/kernel/sys_sparc_64.c
10702@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10703 struct vm_area_struct * vma;
10704 unsigned long task_size = TASK_SIZE;
10705 int do_color_align;
10706+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10707 struct vm_unmapped_area_info info;
10708
10709 if (flags & MAP_FIXED) {
10710 /* We do not accept a shared mapping if it would violate
10711 * cache aliasing constraints.
10712 */
10713- if ((flags & MAP_SHARED) &&
10714+ if ((filp || (flags & MAP_SHARED)) &&
10715 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10716 return -EINVAL;
10717 return addr;
10718@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10719 if (filp || (flags & MAP_SHARED))
10720 do_color_align = 1;
10721
10722+#ifdef CONFIG_PAX_RANDMMAP
10723+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10724+#endif
10725+
10726 if (addr) {
10727 if (do_color_align)
10728 addr = COLOR_ALIGN(addr, pgoff);
10729@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10730 addr = PAGE_ALIGN(addr);
10731
10732 vma = find_vma(mm, addr);
10733- if (task_size - len >= addr &&
10734- (!vma || addr + len <= vma->vm_start))
10735+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10736 return addr;
10737 }
10738
10739 info.flags = 0;
10740 info.length = len;
10741- info.low_limit = TASK_UNMAPPED_BASE;
10742+ info.low_limit = mm->mmap_base;
10743 info.high_limit = min(task_size, VA_EXCLUDE_START);
10744 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10745 info.align_offset = pgoff << PAGE_SHIFT;
10746+ info.threadstack_offset = offset;
10747 addr = vm_unmapped_area(&info);
10748
10749 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10750 VM_BUG_ON(addr != -ENOMEM);
10751 info.low_limit = VA_EXCLUDE_END;
10752+
10753+#ifdef CONFIG_PAX_RANDMMAP
10754+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10755+ info.low_limit += mm->delta_mmap;
10756+#endif
10757+
10758 info.high_limit = task_size;
10759 addr = vm_unmapped_area(&info);
10760 }
10761@@ -150,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10762 unsigned long task_size = STACK_TOP32;
10763 unsigned long addr = addr0;
10764 int do_color_align;
10765+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10766 struct vm_unmapped_area_info info;
10767
10768 /* This should only ever run for 32-bit processes. */
10769@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10770 /* We do not accept a shared mapping if it would violate
10771 * cache aliasing constraints.
10772 */
10773- if ((flags & MAP_SHARED) &&
10774+ if ((filp || (flags & MAP_SHARED)) &&
10775 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10776 return -EINVAL;
10777 return addr;
10778@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10779 if (filp || (flags & MAP_SHARED))
10780 do_color_align = 1;
10781
10782+#ifdef CONFIG_PAX_RANDMMAP
10783+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10784+#endif
10785+
10786 /* requesting a specific address */
10787 if (addr) {
10788 if (do_color_align)
10789@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10790 addr = PAGE_ALIGN(addr);
10791
10792 vma = find_vma(mm, addr);
10793- if (task_size - len >= addr &&
10794- (!vma || addr + len <= vma->vm_start))
10795+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10796 return addr;
10797 }
10798
10799@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10800 info.high_limit = mm->mmap_base;
10801 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10802 info.align_offset = pgoff << PAGE_SHIFT;
10803+ info.threadstack_offset = offset;
10804 addr = vm_unmapped_area(&info);
10805
10806 /*
10807@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10808 VM_BUG_ON(addr != -ENOMEM);
10809 info.flags = 0;
10810 info.low_limit = TASK_UNMAPPED_BASE;
10811+
10812+#ifdef CONFIG_PAX_RANDMMAP
10813+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10814+ info.low_limit += mm->delta_mmap;
10815+#endif
10816+
10817 info.high_limit = STACK_TOP32;
10818 addr = vm_unmapped_area(&info);
10819 }
10820@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
10821 EXPORT_SYMBOL(get_fb_unmapped_area);
10822
10823 /* Essentially the same as PowerPC. */
10824-static unsigned long mmap_rnd(void)
10825+static unsigned long mmap_rnd(struct mm_struct *mm)
10826 {
10827 unsigned long rnd = 0UL;
10828
10829+#ifdef CONFIG_PAX_RANDMMAP
10830+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10831+#endif
10832+
10833 if (current->flags & PF_RANDOMIZE) {
10834 unsigned long val = get_random_int();
10835 if (test_thread_flag(TIF_32BIT))
10836@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
10837
10838 void arch_pick_mmap_layout(struct mm_struct *mm)
10839 {
10840- unsigned long random_factor = mmap_rnd();
10841+ unsigned long random_factor = mmap_rnd(mm);
10842 unsigned long gap;
10843
10844 /*
10845@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10846 gap == RLIM_INFINITY ||
10847 sysctl_legacy_va_layout) {
10848 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
10849+
10850+#ifdef CONFIG_PAX_RANDMMAP
10851+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10852+ mm->mmap_base += mm->delta_mmap;
10853+#endif
10854+
10855 mm->get_unmapped_area = arch_get_unmapped_area;
10856 } else {
10857 /* We know it's 32-bit */
10858@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10859 gap = (task_size / 6 * 5);
10860
10861 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
10862+
10863+#ifdef CONFIG_PAX_RANDMMAP
10864+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10865+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10866+#endif
10867+
10868 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
10869 }
10870 }
10871diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
10872index bb00089..e0ea580 100644
10873--- a/arch/sparc/kernel/syscalls.S
10874+++ b/arch/sparc/kernel/syscalls.S
10875@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
10876 #endif
10877 .align 32
10878 1: ldx [%g6 + TI_FLAGS], %l5
10879- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10880+ andcc %l5, _TIF_WORK_SYSCALL, %g0
10881 be,pt %icc, rtrap
10882 nop
10883 call syscall_trace_leave
10884@@ -194,7 +194,7 @@ linux_sparc_syscall32:
10885
10886 srl %i3, 0, %o3 ! IEU0
10887 srl %i2, 0, %o2 ! IEU0 Group
10888- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10889+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10890 bne,pn %icc, linux_syscall_trace32 ! CTI
10891 mov %i0, %l5 ! IEU1
10892 5: call %l7 ! CTI Group brk forced
10893@@ -218,7 +218,7 @@ linux_sparc_syscall:
10894
10895 mov %i3, %o3 ! IEU1
10896 mov %i4, %o4 ! IEU0 Group
10897- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10898+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10899 bne,pn %icc, linux_syscall_trace ! CTI Group
10900 mov %i0, %l5 ! IEU0
10901 2: call %l7 ! CTI Group brk forced
10902@@ -233,7 +233,7 @@ ret_sys_call:
10903
10904 cmp %o0, -ERESTART_RESTARTBLOCK
10905 bgeu,pn %xcc, 1f
10906- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10907+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10908 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
10909
10910 2:
10911diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
10912index 6fd386c5..6907d81 100644
10913--- a/arch/sparc/kernel/traps_32.c
10914+++ b/arch/sparc/kernel/traps_32.c
10915@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
10916 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
10917 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
10918
10919+extern void gr_handle_kernel_exploit(void);
10920+
10921 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
10922 {
10923 static int die_counter;
10924@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
10925 count++ < 30 &&
10926 (((unsigned long) rw) >= PAGE_OFFSET) &&
10927 !(((unsigned long) rw) & 0x7)) {
10928- printk("Caller[%08lx]: %pS\n", rw->ins[7],
10929+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
10930 (void *) rw->ins[7]);
10931 rw = (struct reg_window32 *)rw->ins[6];
10932 }
10933 }
10934 printk("Instruction DUMP:");
10935 instruction_dump ((unsigned long *) regs->pc);
10936- if(regs->psr & PSR_PS)
10937+ if(regs->psr & PSR_PS) {
10938+ gr_handle_kernel_exploit();
10939 do_exit(SIGKILL);
10940+ }
10941 do_exit(SIGSEGV);
10942 }
10943
10944diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
10945index 0e69974..0c15a6e 100644
10946--- a/arch/sparc/kernel/traps_64.c
10947+++ b/arch/sparc/kernel/traps_64.c
10948@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
10949 i + 1,
10950 p->trapstack[i].tstate, p->trapstack[i].tpc,
10951 p->trapstack[i].tnpc, p->trapstack[i].tt);
10952- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
10953+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
10954 }
10955 }
10956
10957@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
10958
10959 lvl -= 0x100;
10960 if (regs->tstate & TSTATE_PRIV) {
10961+
10962+#ifdef CONFIG_PAX_REFCOUNT
10963+ if (lvl == 6)
10964+ pax_report_refcount_overflow(regs);
10965+#endif
10966+
10967 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
10968 die_if_kernel(buffer, regs);
10969 }
10970@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
10971 void bad_trap_tl1(struct pt_regs *regs, long lvl)
10972 {
10973 char buffer[32];
10974-
10975+
10976 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
10977 0, lvl, SIGTRAP) == NOTIFY_STOP)
10978 return;
10979
10980+#ifdef CONFIG_PAX_REFCOUNT
10981+ if (lvl == 6)
10982+ pax_report_refcount_overflow(regs);
10983+#endif
10984+
10985 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
10986
10987 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
10988@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
10989 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
10990 printk("%s" "ERROR(%d): ",
10991 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
10992- printk("TPC<%pS>\n", (void *) regs->tpc);
10993+ printk("TPC<%pA>\n", (void *) regs->tpc);
10994 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
10995 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
10996 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
10997@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
10998 smp_processor_id(),
10999 (type & 0x1) ? 'I' : 'D',
11000 regs->tpc);
11001- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
11002+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
11003 panic("Irrecoverable Cheetah+ parity error.");
11004 }
11005
11006@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11007 smp_processor_id(),
11008 (type & 0x1) ? 'I' : 'D',
11009 regs->tpc);
11010- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
11011+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
11012 }
11013
11014 struct sun4v_error_entry {
11015@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
11016 /*0x38*/u64 reserved_5;
11017 };
11018
11019-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11020-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11021+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11022+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11023
11024 static const char *sun4v_err_type_to_str(u8 type)
11025 {
11026@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
11027 }
11028
11029 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11030- int cpu, const char *pfx, atomic_t *ocnt)
11031+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
11032 {
11033 u64 *raw_ptr = (u64 *) ent;
11034 u32 attrs;
11035@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11036
11037 show_regs(regs);
11038
11039- if ((cnt = atomic_read(ocnt)) != 0) {
11040- atomic_set(ocnt, 0);
11041+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
11042+ atomic_set_unchecked(ocnt, 0);
11043 wmb();
11044 printk("%s: Queue overflowed %d times.\n",
11045 pfx, cnt);
11046@@ -2048,7 +2059,7 @@ out:
11047 */
11048 void sun4v_resum_overflow(struct pt_regs *regs)
11049 {
11050- atomic_inc(&sun4v_resum_oflow_cnt);
11051+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
11052 }
11053
11054 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
11055@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
11056 /* XXX Actually even this can make not that much sense. Perhaps
11057 * XXX we should just pull the plug and panic directly from here?
11058 */
11059- atomic_inc(&sun4v_nonresum_oflow_cnt);
11060+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
11061 }
11062
11063 static void sun4v_tlb_error(struct pt_regs *regs)
11064@@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
11065
11066 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
11067 regs->tpc, tl);
11068- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
11069+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
11070 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11071- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
11072+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
11073 (void *) regs->u_regs[UREG_I7]);
11074 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
11075 "pte[%lx] error[%lx]\n",
11076@@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
11077
11078 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
11079 regs->tpc, tl);
11080- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
11081+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
11082 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11083- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
11084+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
11085 (void *) regs->u_regs[UREG_I7]);
11086 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
11087 "pte[%lx] error[%lx]\n",
11088@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11089 fp = (unsigned long)sf->fp + STACK_BIAS;
11090 }
11091
11092- printk(" [%016lx] %pS\n", pc, (void *) pc);
11093+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11094 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11095 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
11096 int index = tsk->curr_ret_stack;
11097 if (tsk->ret_stack && index >= graph) {
11098 pc = tsk->ret_stack[index - graph].ret;
11099- printk(" [%016lx] %pS\n", pc, (void *) pc);
11100+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11101 graph++;
11102 }
11103 }
11104@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
11105 return (struct reg_window *) (fp + STACK_BIAS);
11106 }
11107
11108+extern void gr_handle_kernel_exploit(void);
11109+
11110 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11111 {
11112 static int die_counter;
11113@@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11114 while (rw &&
11115 count++ < 30 &&
11116 kstack_valid(tp, (unsigned long) rw)) {
11117- printk("Caller[%016lx]: %pS\n", rw->ins[7],
11118+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
11119 (void *) rw->ins[7]);
11120
11121 rw = kernel_stack_up(rw);
11122@@ -2429,8 +2442,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11123 }
11124 if (panic_on_oops)
11125 panic("Fatal exception");
11126- if (regs->tstate & TSTATE_PRIV)
11127+ if (regs->tstate & TSTATE_PRIV) {
11128+ gr_handle_kernel_exploit();
11129 do_exit(SIGKILL);
11130+ }
11131 do_exit(SIGSEGV);
11132 }
11133 EXPORT_SYMBOL(die_if_kernel);
11134diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
11135index 62098a8..547ab2c 100644
11136--- a/arch/sparc/kernel/unaligned_64.c
11137+++ b/arch/sparc/kernel/unaligned_64.c
11138@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
11139 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
11140
11141 if (__ratelimit(&ratelimit)) {
11142- printk("Kernel unaligned access at TPC[%lx] %pS\n",
11143+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
11144 regs->tpc, (void *) regs->tpc);
11145 }
11146 }
11147diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
11148index 3269b02..64f5231 100644
11149--- a/arch/sparc/lib/Makefile
11150+++ b/arch/sparc/lib/Makefile
11151@@ -2,7 +2,7 @@
11152 #
11153
11154 asflags-y := -ansi -DST_DIV0=0x02
11155-ccflags-y := -Werror
11156+#ccflags-y := -Werror
11157
11158 lib-$(CONFIG_SPARC32) += ashrdi3.o
11159 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
11160diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
11161index 05dac43..76f8ed4 100644
11162--- a/arch/sparc/lib/atomic_64.S
11163+++ b/arch/sparc/lib/atomic_64.S
11164@@ -15,11 +15,22 @@
11165 * a value and does the barriers.
11166 */
11167
11168-#define ATOMIC_OP(op) \
11169-ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11170+#ifdef CONFIG_PAX_REFCOUNT
11171+#define __REFCOUNT_OP(op) op##cc
11172+#define __OVERFLOW_IOP tvs %icc, 6;
11173+#define __OVERFLOW_XOP tvs %xcc, 6;
11174+#else
11175+#define __REFCOUNT_OP(op) op
11176+#define __OVERFLOW_IOP
11177+#define __OVERFLOW_XOP
11178+#endif
11179+
11180+#define __ATOMIC_OP(op, suffix, asm_op, post_op) \
11181+ENTRY(atomic_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11182 BACKOFF_SETUP(%o2); \
11183 1: lduw [%o1], %g1; \
11184- op %g1, %o0, %g7; \
11185+ asm_op %g1, %o0, %g7; \
11186+ post_op \
11187 cas [%o1], %g1, %g7; \
11188 cmp %g1, %g7; \
11189 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11190@@ -29,11 +40,15 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11191 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11192 ENDPROC(atomic_##op); \
11193
11194-#define ATOMIC_OP_RETURN(op) \
11195-ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11196+#define ATOMIC_OP(op) __ATOMIC_OP(op, , op, ) \
11197+ __ATOMIC_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11198+
11199+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op) \
11200+ENTRY(atomic_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11201 BACKOFF_SETUP(%o2); \
11202 1: lduw [%o1], %g1; \
11203- op %g1, %o0, %g7; \
11204+ asm_op %g1, %o0, %g7; \
11205+ post_op \
11206 cas [%o1], %g1, %g7; \
11207 cmp %g1, %g7; \
11208 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11209@@ -43,6 +58,9 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11210 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11211 ENDPROC(atomic_##op##_return);
11212
11213+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, , op, ) \
11214+ __ATOMIC_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11215+
11216 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11217
11218 ATOMIC_OPS(add)
11219@@ -50,13 +68,16 @@ ATOMIC_OPS(sub)
11220
11221 #undef ATOMIC_OPS
11222 #undef ATOMIC_OP_RETURN
11223+#undef __ATOMIC_OP_RETURN
11224 #undef ATOMIC_OP
11225+#undef __ATOMIC_OP
11226
11227-#define ATOMIC64_OP(op) \
11228-ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11229+#define __ATOMIC64_OP(op, suffix, asm_op, post_op) \
11230+ENTRY(atomic64_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11231 BACKOFF_SETUP(%o2); \
11232 1: ldx [%o1], %g1; \
11233- op %g1, %o0, %g7; \
11234+ asm_op %g1, %o0, %g7; \
11235+ post_op \
11236 casx [%o1], %g1, %g7; \
11237 cmp %g1, %g7; \
11238 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11239@@ -66,11 +87,15 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11240 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11241 ENDPROC(atomic64_##op); \
11242
11243-#define ATOMIC64_OP_RETURN(op) \
11244-ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11245+#define ATOMIC64_OP(op) __ATOMIC64_OP(op, , op, ) \
11246+ __ATOMIC64_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11247+
11248+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op) \
11249+ENTRY(atomic64_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11250 BACKOFF_SETUP(%o2); \
11251 1: ldx [%o1], %g1; \
11252- op %g1, %o0, %g7; \
11253+ asm_op %g1, %o0, %g7; \
11254+ post_op \
11255 casx [%o1], %g1, %g7; \
11256 cmp %g1, %g7; \
11257 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11258@@ -80,6 +105,9 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11259 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11260 ENDPROC(atomic64_##op##_return);
11261
11262+#define ATOMIC64_OP_RETURN(op) __ATOMIC64_OP_RETURN(op, , op, ) \
11263+i __ATOMIC64_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11264+
11265 #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
11266
11267 ATOMIC64_OPS(add)
11268@@ -87,7 +115,12 @@ ATOMIC64_OPS(sub)
11269
11270 #undef ATOMIC64_OPS
11271 #undef ATOMIC64_OP_RETURN
11272+#undef __ATOMIC64_OP_RETURN
11273 #undef ATOMIC64_OP
11274+#undef __ATOMIC64_OP
11275+#undef __OVERFLOW_XOP
11276+#undef __OVERFLOW_IOP
11277+#undef __REFCOUNT_OP
11278
11279 ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
11280 BACKOFF_SETUP(%o2)
11281diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
11282index 1d649a9..fbc5bfc 100644
11283--- a/arch/sparc/lib/ksyms.c
11284+++ b/arch/sparc/lib/ksyms.c
11285@@ -101,7 +101,9 @@ EXPORT_SYMBOL(__clear_user);
11286 /* Atomic counter implementation. */
11287 #define ATOMIC_OP(op) \
11288 EXPORT_SYMBOL(atomic_##op); \
11289-EXPORT_SYMBOL(atomic64_##op);
11290+EXPORT_SYMBOL(atomic_##op##_unchecked); \
11291+EXPORT_SYMBOL(atomic64_##op); \
11292+EXPORT_SYMBOL(atomic64_##op##_unchecked);
11293
11294 #define ATOMIC_OP_RETURN(op) \
11295 EXPORT_SYMBOL(atomic_##op##_return); \
11296@@ -110,6 +112,8 @@ EXPORT_SYMBOL(atomic64_##op##_return);
11297 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11298
11299 ATOMIC_OPS(add)
11300+EXPORT_SYMBOL(atomic_add_ret_unchecked);
11301+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
11302 ATOMIC_OPS(sub)
11303
11304 #undef ATOMIC_OPS
11305diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
11306index 30c3ecc..736f015 100644
11307--- a/arch/sparc/mm/Makefile
11308+++ b/arch/sparc/mm/Makefile
11309@@ -2,7 +2,7 @@
11310 #
11311
11312 asflags-y := -ansi
11313-ccflags-y := -Werror
11314+#ccflags-y := -Werror
11315
11316 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
11317 obj-y += fault_$(BITS).o
11318diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
11319index 70d8171..274c6c0 100644
11320--- a/arch/sparc/mm/fault_32.c
11321+++ b/arch/sparc/mm/fault_32.c
11322@@ -21,6 +21,9 @@
11323 #include <linux/perf_event.h>
11324 #include <linux/interrupt.h>
11325 #include <linux/kdebug.h>
11326+#include <linux/slab.h>
11327+#include <linux/pagemap.h>
11328+#include <linux/compiler.h>
11329
11330 #include <asm/page.h>
11331 #include <asm/pgtable.h>
11332@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
11333 return safe_compute_effective_address(regs, insn);
11334 }
11335
11336+#ifdef CONFIG_PAX_PAGEEXEC
11337+#ifdef CONFIG_PAX_DLRESOLVE
11338+static void pax_emuplt_close(struct vm_area_struct *vma)
11339+{
11340+ vma->vm_mm->call_dl_resolve = 0UL;
11341+}
11342+
11343+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11344+{
11345+ unsigned int *kaddr;
11346+
11347+ vmf->page = alloc_page(GFP_HIGHUSER);
11348+ if (!vmf->page)
11349+ return VM_FAULT_OOM;
11350+
11351+ kaddr = kmap(vmf->page);
11352+ memset(kaddr, 0, PAGE_SIZE);
11353+ kaddr[0] = 0x9DE3BFA8U; /* save */
11354+ flush_dcache_page(vmf->page);
11355+ kunmap(vmf->page);
11356+ return VM_FAULT_MAJOR;
11357+}
11358+
11359+static const struct vm_operations_struct pax_vm_ops = {
11360+ .close = pax_emuplt_close,
11361+ .fault = pax_emuplt_fault
11362+};
11363+
11364+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11365+{
11366+ int ret;
11367+
11368+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11369+ vma->vm_mm = current->mm;
11370+ vma->vm_start = addr;
11371+ vma->vm_end = addr + PAGE_SIZE;
11372+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11373+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11374+ vma->vm_ops = &pax_vm_ops;
11375+
11376+ ret = insert_vm_struct(current->mm, vma);
11377+ if (ret)
11378+ return ret;
11379+
11380+ ++current->mm->total_vm;
11381+ return 0;
11382+}
11383+#endif
11384+
11385+/*
11386+ * PaX: decide what to do with offenders (regs->pc = fault address)
11387+ *
11388+ * returns 1 when task should be killed
11389+ * 2 when patched PLT trampoline was detected
11390+ * 3 when unpatched PLT trampoline was detected
11391+ */
11392+static int pax_handle_fetch_fault(struct pt_regs *regs)
11393+{
11394+
11395+#ifdef CONFIG_PAX_EMUPLT
11396+ int err;
11397+
11398+ do { /* PaX: patched PLT emulation #1 */
11399+ unsigned int sethi1, sethi2, jmpl;
11400+
11401+ err = get_user(sethi1, (unsigned int *)regs->pc);
11402+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
11403+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
11404+
11405+ if (err)
11406+ break;
11407+
11408+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11409+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11410+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11411+ {
11412+ unsigned int addr;
11413+
11414+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11415+ addr = regs->u_regs[UREG_G1];
11416+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11417+ regs->pc = addr;
11418+ regs->npc = addr+4;
11419+ return 2;
11420+ }
11421+ } while (0);
11422+
11423+ do { /* PaX: patched PLT emulation #2 */
11424+ unsigned int ba;
11425+
11426+ err = get_user(ba, (unsigned int *)regs->pc);
11427+
11428+ if (err)
11429+ break;
11430+
11431+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11432+ unsigned int addr;
11433+
11434+ if ((ba & 0xFFC00000U) == 0x30800000U)
11435+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11436+ else
11437+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11438+ regs->pc = addr;
11439+ regs->npc = addr+4;
11440+ return 2;
11441+ }
11442+ } while (0);
11443+
11444+ do { /* PaX: patched PLT emulation #3 */
11445+ unsigned int sethi, bajmpl, nop;
11446+
11447+ err = get_user(sethi, (unsigned int *)regs->pc);
11448+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
11449+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11450+
11451+ if (err)
11452+ break;
11453+
11454+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11455+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11456+ nop == 0x01000000U)
11457+ {
11458+ unsigned int addr;
11459+
11460+ addr = (sethi & 0x003FFFFFU) << 10;
11461+ regs->u_regs[UREG_G1] = addr;
11462+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11463+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11464+ else
11465+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11466+ regs->pc = addr;
11467+ regs->npc = addr+4;
11468+ return 2;
11469+ }
11470+ } while (0);
11471+
11472+ do { /* PaX: unpatched PLT emulation step 1 */
11473+ unsigned int sethi, ba, nop;
11474+
11475+ err = get_user(sethi, (unsigned int *)regs->pc);
11476+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
11477+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11478+
11479+ if (err)
11480+ break;
11481+
11482+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11483+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11484+ nop == 0x01000000U)
11485+ {
11486+ unsigned int addr, save, call;
11487+
11488+ if ((ba & 0xFFC00000U) == 0x30800000U)
11489+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11490+ else
11491+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11492+
11493+ err = get_user(save, (unsigned int *)addr);
11494+ err |= get_user(call, (unsigned int *)(addr+4));
11495+ err |= get_user(nop, (unsigned int *)(addr+8));
11496+ if (err)
11497+ break;
11498+
11499+#ifdef CONFIG_PAX_DLRESOLVE
11500+ if (save == 0x9DE3BFA8U &&
11501+ (call & 0xC0000000U) == 0x40000000U &&
11502+ nop == 0x01000000U)
11503+ {
11504+ struct vm_area_struct *vma;
11505+ unsigned long call_dl_resolve;
11506+
11507+ down_read(&current->mm->mmap_sem);
11508+ call_dl_resolve = current->mm->call_dl_resolve;
11509+ up_read(&current->mm->mmap_sem);
11510+ if (likely(call_dl_resolve))
11511+ goto emulate;
11512+
11513+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11514+
11515+ down_write(&current->mm->mmap_sem);
11516+ if (current->mm->call_dl_resolve) {
11517+ call_dl_resolve = current->mm->call_dl_resolve;
11518+ up_write(&current->mm->mmap_sem);
11519+ if (vma)
11520+ kmem_cache_free(vm_area_cachep, vma);
11521+ goto emulate;
11522+ }
11523+
11524+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11525+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11526+ up_write(&current->mm->mmap_sem);
11527+ if (vma)
11528+ kmem_cache_free(vm_area_cachep, vma);
11529+ return 1;
11530+ }
11531+
11532+ if (pax_insert_vma(vma, call_dl_resolve)) {
11533+ up_write(&current->mm->mmap_sem);
11534+ kmem_cache_free(vm_area_cachep, vma);
11535+ return 1;
11536+ }
11537+
11538+ current->mm->call_dl_resolve = call_dl_resolve;
11539+ up_write(&current->mm->mmap_sem);
11540+
11541+emulate:
11542+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11543+ regs->pc = call_dl_resolve;
11544+ regs->npc = addr+4;
11545+ return 3;
11546+ }
11547+#endif
11548+
11549+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11550+ if ((save & 0xFFC00000U) == 0x05000000U &&
11551+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11552+ nop == 0x01000000U)
11553+ {
11554+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11555+ regs->u_regs[UREG_G2] = addr + 4;
11556+ addr = (save & 0x003FFFFFU) << 10;
11557+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11558+ regs->pc = addr;
11559+ regs->npc = addr+4;
11560+ return 3;
11561+ }
11562+ }
11563+ } while (0);
11564+
11565+ do { /* PaX: unpatched PLT emulation step 2 */
11566+ unsigned int save, call, nop;
11567+
11568+ err = get_user(save, (unsigned int *)(regs->pc-4));
11569+ err |= get_user(call, (unsigned int *)regs->pc);
11570+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
11571+ if (err)
11572+ break;
11573+
11574+ if (save == 0x9DE3BFA8U &&
11575+ (call & 0xC0000000U) == 0x40000000U &&
11576+ nop == 0x01000000U)
11577+ {
11578+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
11579+
11580+ regs->u_regs[UREG_RETPC] = regs->pc;
11581+ regs->pc = dl_resolve;
11582+ regs->npc = dl_resolve+4;
11583+ return 3;
11584+ }
11585+ } while (0);
11586+#endif
11587+
11588+ return 1;
11589+}
11590+
11591+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11592+{
11593+ unsigned long i;
11594+
11595+ printk(KERN_ERR "PAX: bytes at PC: ");
11596+ for (i = 0; i < 8; i++) {
11597+ unsigned int c;
11598+ if (get_user(c, (unsigned int *)pc+i))
11599+ printk(KERN_CONT "???????? ");
11600+ else
11601+ printk(KERN_CONT "%08x ", c);
11602+ }
11603+ printk("\n");
11604+}
11605+#endif
11606+
11607 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
11608 int text_fault)
11609 {
11610@@ -226,6 +500,24 @@ good_area:
11611 if (!(vma->vm_flags & VM_WRITE))
11612 goto bad_area;
11613 } else {
11614+
11615+#ifdef CONFIG_PAX_PAGEEXEC
11616+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
11617+ up_read(&mm->mmap_sem);
11618+ switch (pax_handle_fetch_fault(regs)) {
11619+
11620+#ifdef CONFIG_PAX_EMUPLT
11621+ case 2:
11622+ case 3:
11623+ return;
11624+#endif
11625+
11626+ }
11627+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
11628+ do_group_exit(SIGKILL);
11629+ }
11630+#endif
11631+
11632 /* Allow reads even for write-only mappings */
11633 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
11634 goto bad_area;
11635diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
11636index 4798232..f76e3aa 100644
11637--- a/arch/sparc/mm/fault_64.c
11638+++ b/arch/sparc/mm/fault_64.c
11639@@ -22,6 +22,9 @@
11640 #include <linux/kdebug.h>
11641 #include <linux/percpu.h>
11642 #include <linux/context_tracking.h>
11643+#include <linux/slab.h>
11644+#include <linux/pagemap.h>
11645+#include <linux/compiler.h>
11646
11647 #include <asm/page.h>
11648 #include <asm/pgtable.h>
11649@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
11650 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
11651 regs->tpc);
11652 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
11653- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
11654+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
11655 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
11656 dump_stack();
11657 unhandled_fault(regs->tpc, current, regs);
11658@@ -279,6 +282,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
11659 show_regs(regs);
11660 }
11661
11662+#ifdef CONFIG_PAX_PAGEEXEC
11663+#ifdef CONFIG_PAX_DLRESOLVE
11664+static void pax_emuplt_close(struct vm_area_struct *vma)
11665+{
11666+ vma->vm_mm->call_dl_resolve = 0UL;
11667+}
11668+
11669+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11670+{
11671+ unsigned int *kaddr;
11672+
11673+ vmf->page = alloc_page(GFP_HIGHUSER);
11674+ if (!vmf->page)
11675+ return VM_FAULT_OOM;
11676+
11677+ kaddr = kmap(vmf->page);
11678+ memset(kaddr, 0, PAGE_SIZE);
11679+ kaddr[0] = 0x9DE3BFA8U; /* save */
11680+ flush_dcache_page(vmf->page);
11681+ kunmap(vmf->page);
11682+ return VM_FAULT_MAJOR;
11683+}
11684+
11685+static const struct vm_operations_struct pax_vm_ops = {
11686+ .close = pax_emuplt_close,
11687+ .fault = pax_emuplt_fault
11688+};
11689+
11690+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11691+{
11692+ int ret;
11693+
11694+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11695+ vma->vm_mm = current->mm;
11696+ vma->vm_start = addr;
11697+ vma->vm_end = addr + PAGE_SIZE;
11698+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11699+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11700+ vma->vm_ops = &pax_vm_ops;
11701+
11702+ ret = insert_vm_struct(current->mm, vma);
11703+ if (ret)
11704+ return ret;
11705+
11706+ ++current->mm->total_vm;
11707+ return 0;
11708+}
11709+#endif
11710+
11711+/*
11712+ * PaX: decide what to do with offenders (regs->tpc = fault address)
11713+ *
11714+ * returns 1 when task should be killed
11715+ * 2 when patched PLT trampoline was detected
11716+ * 3 when unpatched PLT trampoline was detected
11717+ */
11718+static int pax_handle_fetch_fault(struct pt_regs *regs)
11719+{
11720+
11721+#ifdef CONFIG_PAX_EMUPLT
11722+ int err;
11723+
11724+ do { /* PaX: patched PLT emulation #1 */
11725+ unsigned int sethi1, sethi2, jmpl;
11726+
11727+ err = get_user(sethi1, (unsigned int *)regs->tpc);
11728+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
11729+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
11730+
11731+ if (err)
11732+ break;
11733+
11734+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11735+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11736+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11737+ {
11738+ unsigned long addr;
11739+
11740+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11741+ addr = regs->u_regs[UREG_G1];
11742+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11743+
11744+ if (test_thread_flag(TIF_32BIT))
11745+ addr &= 0xFFFFFFFFUL;
11746+
11747+ regs->tpc = addr;
11748+ regs->tnpc = addr+4;
11749+ return 2;
11750+ }
11751+ } while (0);
11752+
11753+ do { /* PaX: patched PLT emulation #2 */
11754+ unsigned int ba;
11755+
11756+ err = get_user(ba, (unsigned int *)regs->tpc);
11757+
11758+ if (err)
11759+ break;
11760+
11761+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11762+ unsigned long addr;
11763+
11764+ if ((ba & 0xFFC00000U) == 0x30800000U)
11765+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11766+ else
11767+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11768+
11769+ if (test_thread_flag(TIF_32BIT))
11770+ addr &= 0xFFFFFFFFUL;
11771+
11772+ regs->tpc = addr;
11773+ regs->tnpc = addr+4;
11774+ return 2;
11775+ }
11776+ } while (0);
11777+
11778+ do { /* PaX: patched PLT emulation #3 */
11779+ unsigned int sethi, bajmpl, nop;
11780+
11781+ err = get_user(sethi, (unsigned int *)regs->tpc);
11782+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
11783+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11784+
11785+ if (err)
11786+ break;
11787+
11788+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11789+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11790+ nop == 0x01000000U)
11791+ {
11792+ unsigned long addr;
11793+
11794+ addr = (sethi & 0x003FFFFFU) << 10;
11795+ regs->u_regs[UREG_G1] = addr;
11796+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11797+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11798+ else
11799+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11800+
11801+ if (test_thread_flag(TIF_32BIT))
11802+ addr &= 0xFFFFFFFFUL;
11803+
11804+ regs->tpc = addr;
11805+ regs->tnpc = addr+4;
11806+ return 2;
11807+ }
11808+ } while (0);
11809+
11810+ do { /* PaX: patched PLT emulation #4 */
11811+ unsigned int sethi, mov1, call, mov2;
11812+
11813+ err = get_user(sethi, (unsigned int *)regs->tpc);
11814+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
11815+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
11816+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
11817+
11818+ if (err)
11819+ break;
11820+
11821+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11822+ mov1 == 0x8210000FU &&
11823+ (call & 0xC0000000U) == 0x40000000U &&
11824+ mov2 == 0x9E100001U)
11825+ {
11826+ unsigned long addr;
11827+
11828+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
11829+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
11830+
11831+ if (test_thread_flag(TIF_32BIT))
11832+ addr &= 0xFFFFFFFFUL;
11833+
11834+ regs->tpc = addr;
11835+ regs->tnpc = addr+4;
11836+ return 2;
11837+ }
11838+ } while (0);
11839+
11840+ do { /* PaX: patched PLT emulation #5 */
11841+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
11842+
11843+ err = get_user(sethi, (unsigned int *)regs->tpc);
11844+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11845+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11846+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
11847+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
11848+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
11849+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
11850+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
11851+
11852+ if (err)
11853+ break;
11854+
11855+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11856+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11857+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11858+ (or1 & 0xFFFFE000U) == 0x82106000U &&
11859+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
11860+ sllx == 0x83287020U &&
11861+ jmpl == 0x81C04005U &&
11862+ nop == 0x01000000U)
11863+ {
11864+ unsigned long addr;
11865+
11866+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11867+ regs->u_regs[UREG_G1] <<= 32;
11868+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11869+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11870+ regs->tpc = addr;
11871+ regs->tnpc = addr+4;
11872+ return 2;
11873+ }
11874+ } while (0);
11875+
11876+ do { /* PaX: patched PLT emulation #6 */
11877+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
11878+
11879+ err = get_user(sethi, (unsigned int *)regs->tpc);
11880+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11881+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11882+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
11883+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
11884+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
11885+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
11886+
11887+ if (err)
11888+ break;
11889+
11890+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11891+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11892+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11893+ sllx == 0x83287020U &&
11894+ (or & 0xFFFFE000U) == 0x8A116000U &&
11895+ jmpl == 0x81C04005U &&
11896+ nop == 0x01000000U)
11897+ {
11898+ unsigned long addr;
11899+
11900+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
11901+ regs->u_regs[UREG_G1] <<= 32;
11902+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
11903+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11904+ regs->tpc = addr;
11905+ regs->tnpc = addr+4;
11906+ return 2;
11907+ }
11908+ } while (0);
11909+
11910+ do { /* PaX: unpatched PLT emulation step 1 */
11911+ unsigned int sethi, ba, nop;
11912+
11913+ err = get_user(sethi, (unsigned int *)regs->tpc);
11914+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11915+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11916+
11917+ if (err)
11918+ break;
11919+
11920+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11921+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11922+ nop == 0x01000000U)
11923+ {
11924+ unsigned long addr;
11925+ unsigned int save, call;
11926+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
11927+
11928+ if ((ba & 0xFFC00000U) == 0x30800000U)
11929+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11930+ else
11931+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11932+
11933+ if (test_thread_flag(TIF_32BIT))
11934+ addr &= 0xFFFFFFFFUL;
11935+
11936+ err = get_user(save, (unsigned int *)addr);
11937+ err |= get_user(call, (unsigned int *)(addr+4));
11938+ err |= get_user(nop, (unsigned int *)(addr+8));
11939+ if (err)
11940+ break;
11941+
11942+#ifdef CONFIG_PAX_DLRESOLVE
11943+ if (save == 0x9DE3BFA8U &&
11944+ (call & 0xC0000000U) == 0x40000000U &&
11945+ nop == 0x01000000U)
11946+ {
11947+ struct vm_area_struct *vma;
11948+ unsigned long call_dl_resolve;
11949+
11950+ down_read(&current->mm->mmap_sem);
11951+ call_dl_resolve = current->mm->call_dl_resolve;
11952+ up_read(&current->mm->mmap_sem);
11953+ if (likely(call_dl_resolve))
11954+ goto emulate;
11955+
11956+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11957+
11958+ down_write(&current->mm->mmap_sem);
11959+ if (current->mm->call_dl_resolve) {
11960+ call_dl_resolve = current->mm->call_dl_resolve;
11961+ up_write(&current->mm->mmap_sem);
11962+ if (vma)
11963+ kmem_cache_free(vm_area_cachep, vma);
11964+ goto emulate;
11965+ }
11966+
11967+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11968+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11969+ up_write(&current->mm->mmap_sem);
11970+ if (vma)
11971+ kmem_cache_free(vm_area_cachep, vma);
11972+ return 1;
11973+ }
11974+
11975+ if (pax_insert_vma(vma, call_dl_resolve)) {
11976+ up_write(&current->mm->mmap_sem);
11977+ kmem_cache_free(vm_area_cachep, vma);
11978+ return 1;
11979+ }
11980+
11981+ current->mm->call_dl_resolve = call_dl_resolve;
11982+ up_write(&current->mm->mmap_sem);
11983+
11984+emulate:
11985+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11986+ regs->tpc = call_dl_resolve;
11987+ regs->tnpc = addr+4;
11988+ return 3;
11989+ }
11990+#endif
11991+
11992+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11993+ if ((save & 0xFFC00000U) == 0x05000000U &&
11994+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11995+ nop == 0x01000000U)
11996+ {
11997+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11998+ regs->u_regs[UREG_G2] = addr + 4;
11999+ addr = (save & 0x003FFFFFU) << 10;
12000+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12001+
12002+ if (test_thread_flag(TIF_32BIT))
12003+ addr &= 0xFFFFFFFFUL;
12004+
12005+ regs->tpc = addr;
12006+ regs->tnpc = addr+4;
12007+ return 3;
12008+ }
12009+
12010+ /* PaX: 64-bit PLT stub */
12011+ err = get_user(sethi1, (unsigned int *)addr);
12012+ err |= get_user(sethi2, (unsigned int *)(addr+4));
12013+ err |= get_user(or1, (unsigned int *)(addr+8));
12014+ err |= get_user(or2, (unsigned int *)(addr+12));
12015+ err |= get_user(sllx, (unsigned int *)(addr+16));
12016+ err |= get_user(add, (unsigned int *)(addr+20));
12017+ err |= get_user(jmpl, (unsigned int *)(addr+24));
12018+ err |= get_user(nop, (unsigned int *)(addr+28));
12019+ if (err)
12020+ break;
12021+
12022+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
12023+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12024+ (or1 & 0xFFFFE000U) == 0x88112000U &&
12025+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12026+ sllx == 0x89293020U &&
12027+ add == 0x8A010005U &&
12028+ jmpl == 0x89C14000U &&
12029+ nop == 0x01000000U)
12030+ {
12031+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12032+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12033+ regs->u_regs[UREG_G4] <<= 32;
12034+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12035+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
12036+ regs->u_regs[UREG_G4] = addr + 24;
12037+ addr = regs->u_regs[UREG_G5];
12038+ regs->tpc = addr;
12039+ regs->tnpc = addr+4;
12040+ return 3;
12041+ }
12042+ }
12043+ } while (0);
12044+
12045+#ifdef CONFIG_PAX_DLRESOLVE
12046+ do { /* PaX: unpatched PLT emulation step 2 */
12047+ unsigned int save, call, nop;
12048+
12049+ err = get_user(save, (unsigned int *)(regs->tpc-4));
12050+ err |= get_user(call, (unsigned int *)regs->tpc);
12051+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
12052+ if (err)
12053+ break;
12054+
12055+ if (save == 0x9DE3BFA8U &&
12056+ (call & 0xC0000000U) == 0x40000000U &&
12057+ nop == 0x01000000U)
12058+ {
12059+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12060+
12061+ if (test_thread_flag(TIF_32BIT))
12062+ dl_resolve &= 0xFFFFFFFFUL;
12063+
12064+ regs->u_regs[UREG_RETPC] = regs->tpc;
12065+ regs->tpc = dl_resolve;
12066+ regs->tnpc = dl_resolve+4;
12067+ return 3;
12068+ }
12069+ } while (0);
12070+#endif
12071+
12072+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
12073+ unsigned int sethi, ba, nop;
12074+
12075+ err = get_user(sethi, (unsigned int *)regs->tpc);
12076+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12077+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12078+
12079+ if (err)
12080+ break;
12081+
12082+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12083+ (ba & 0xFFF00000U) == 0x30600000U &&
12084+ nop == 0x01000000U)
12085+ {
12086+ unsigned long addr;
12087+
12088+ addr = (sethi & 0x003FFFFFU) << 10;
12089+ regs->u_regs[UREG_G1] = addr;
12090+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12091+
12092+ if (test_thread_flag(TIF_32BIT))
12093+ addr &= 0xFFFFFFFFUL;
12094+
12095+ regs->tpc = addr;
12096+ regs->tnpc = addr+4;
12097+ return 2;
12098+ }
12099+ } while (0);
12100+
12101+#endif
12102+
12103+ return 1;
12104+}
12105+
12106+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12107+{
12108+ unsigned long i;
12109+
12110+ printk(KERN_ERR "PAX: bytes at PC: ");
12111+ for (i = 0; i < 8; i++) {
12112+ unsigned int c;
12113+ if (get_user(c, (unsigned int *)pc+i))
12114+ printk(KERN_CONT "???????? ");
12115+ else
12116+ printk(KERN_CONT "%08x ", c);
12117+ }
12118+ printk("\n");
12119+}
12120+#endif
12121+
12122 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
12123 {
12124 enum ctx_state prev_state = exception_enter();
12125@@ -353,6 +816,29 @@ retry:
12126 if (!vma)
12127 goto bad_area;
12128
12129+#ifdef CONFIG_PAX_PAGEEXEC
12130+ /* PaX: detect ITLB misses on non-exec pages */
12131+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
12132+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
12133+ {
12134+ if (address != regs->tpc)
12135+ goto good_area;
12136+
12137+ up_read(&mm->mmap_sem);
12138+ switch (pax_handle_fetch_fault(regs)) {
12139+
12140+#ifdef CONFIG_PAX_EMUPLT
12141+ case 2:
12142+ case 3:
12143+ return;
12144+#endif
12145+
12146+ }
12147+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
12148+ do_group_exit(SIGKILL);
12149+ }
12150+#endif
12151+
12152 /* Pure DTLB misses do not tell us whether the fault causing
12153 * load/store/atomic was a write or not, it only says that there
12154 * was no match. So in such a case we (carefully) read the
12155diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
12156index 4242eab..9ae6360 100644
12157--- a/arch/sparc/mm/hugetlbpage.c
12158+++ b/arch/sparc/mm/hugetlbpage.c
12159@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12160 unsigned long addr,
12161 unsigned long len,
12162 unsigned long pgoff,
12163- unsigned long flags)
12164+ unsigned long flags,
12165+ unsigned long offset)
12166 {
12167+ struct mm_struct *mm = current->mm;
12168 unsigned long task_size = TASK_SIZE;
12169 struct vm_unmapped_area_info info;
12170
12171@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12172
12173 info.flags = 0;
12174 info.length = len;
12175- info.low_limit = TASK_UNMAPPED_BASE;
12176+ info.low_limit = mm->mmap_base;
12177 info.high_limit = min(task_size, VA_EXCLUDE_START);
12178 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12179 info.align_offset = 0;
12180+ info.threadstack_offset = offset;
12181 addr = vm_unmapped_area(&info);
12182
12183 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
12184 VM_BUG_ON(addr != -ENOMEM);
12185 info.low_limit = VA_EXCLUDE_END;
12186+
12187+#ifdef CONFIG_PAX_RANDMMAP
12188+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12189+ info.low_limit += mm->delta_mmap;
12190+#endif
12191+
12192 info.high_limit = task_size;
12193 addr = vm_unmapped_area(&info);
12194 }
12195@@ -55,7 +64,8 @@ static unsigned long
12196 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12197 const unsigned long len,
12198 const unsigned long pgoff,
12199- const unsigned long flags)
12200+ const unsigned long flags,
12201+ const unsigned long offset)
12202 {
12203 struct mm_struct *mm = current->mm;
12204 unsigned long addr = addr0;
12205@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12206 info.high_limit = mm->mmap_base;
12207 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12208 info.align_offset = 0;
12209+ info.threadstack_offset = offset;
12210 addr = vm_unmapped_area(&info);
12211
12212 /*
12213@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12214 VM_BUG_ON(addr != -ENOMEM);
12215 info.flags = 0;
12216 info.low_limit = TASK_UNMAPPED_BASE;
12217+
12218+#ifdef CONFIG_PAX_RANDMMAP
12219+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12220+ info.low_limit += mm->delta_mmap;
12221+#endif
12222+
12223 info.high_limit = STACK_TOP32;
12224 addr = vm_unmapped_area(&info);
12225 }
12226@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12227 struct mm_struct *mm = current->mm;
12228 struct vm_area_struct *vma;
12229 unsigned long task_size = TASK_SIZE;
12230+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
12231
12232 if (test_thread_flag(TIF_32BIT))
12233 task_size = STACK_TOP32;
12234@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12235 return addr;
12236 }
12237
12238+#ifdef CONFIG_PAX_RANDMMAP
12239+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
12240+#endif
12241+
12242 if (addr) {
12243 addr = ALIGN(addr, HPAGE_SIZE);
12244 vma = find_vma(mm, addr);
12245- if (task_size - len >= addr &&
12246- (!vma || addr + len <= vma->vm_start))
12247+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
12248 return addr;
12249 }
12250 if (mm->get_unmapped_area == arch_get_unmapped_area)
12251 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
12252- pgoff, flags);
12253+ pgoff, flags, offset);
12254 else
12255 return hugetlb_get_unmapped_area_topdown(file, addr, len,
12256- pgoff, flags);
12257+ pgoff, flags, offset);
12258 }
12259
12260 pte_t *huge_pte_alloc(struct mm_struct *mm,
12261diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
12262index 4ca0d6b..e89bca1 100644
12263--- a/arch/sparc/mm/init_64.c
12264+++ b/arch/sparc/mm/init_64.c
12265@@ -186,9 +186,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
12266 int num_kernel_image_mappings;
12267
12268 #ifdef CONFIG_DEBUG_DCFLUSH
12269-atomic_t dcpage_flushes = ATOMIC_INIT(0);
12270+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
12271 #ifdef CONFIG_SMP
12272-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12273+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12274 #endif
12275 #endif
12276
12277@@ -196,7 +196,7 @@ inline void flush_dcache_page_impl(struct page *page)
12278 {
12279 BUG_ON(tlb_type == hypervisor);
12280 #ifdef CONFIG_DEBUG_DCFLUSH
12281- atomic_inc(&dcpage_flushes);
12282+ atomic_inc_unchecked(&dcpage_flushes);
12283 #endif
12284
12285 #ifdef DCACHE_ALIASING_POSSIBLE
12286@@ -468,10 +468,10 @@ void mmu_info(struct seq_file *m)
12287
12288 #ifdef CONFIG_DEBUG_DCFLUSH
12289 seq_printf(m, "DCPageFlushes\t: %d\n",
12290- atomic_read(&dcpage_flushes));
12291+ atomic_read_unchecked(&dcpage_flushes));
12292 #ifdef CONFIG_SMP
12293 seq_printf(m, "DCPageFlushesXC\t: %d\n",
12294- atomic_read(&dcpage_flushes_xcall));
12295+ atomic_read_unchecked(&dcpage_flushes_xcall));
12296 #endif /* CONFIG_SMP */
12297 #endif /* CONFIG_DEBUG_DCFLUSH */
12298 }
12299diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
12300index 7cca418..53fc030 100644
12301--- a/arch/tile/Kconfig
12302+++ b/arch/tile/Kconfig
12303@@ -192,6 +192,7 @@ source "kernel/Kconfig.hz"
12304
12305 config KEXEC
12306 bool "kexec system call"
12307+ depends on !GRKERNSEC_KMEM
12308 ---help---
12309 kexec is a system call that implements the ability to shutdown your
12310 current kernel, and to start another kernel. It is like a reboot
12311diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
12312index 7b11c5f..755a026 100644
12313--- a/arch/tile/include/asm/atomic_64.h
12314+++ b/arch/tile/include/asm/atomic_64.h
12315@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
12316
12317 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12318
12319+#define atomic64_read_unchecked(v) atomic64_read(v)
12320+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
12321+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
12322+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
12323+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
12324+#define atomic64_inc_unchecked(v) atomic64_inc(v)
12325+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
12326+#define atomic64_dec_unchecked(v) atomic64_dec(v)
12327+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
12328+
12329 /* Define this to indicate that cmpxchg is an efficient operation. */
12330 #define __HAVE_ARCH_CMPXCHG
12331
12332diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
12333index 6160761..00cac88 100644
12334--- a/arch/tile/include/asm/cache.h
12335+++ b/arch/tile/include/asm/cache.h
12336@@ -15,11 +15,12 @@
12337 #ifndef _ASM_TILE_CACHE_H
12338 #define _ASM_TILE_CACHE_H
12339
12340+#include <linux/const.h>
12341 #include <arch/chip.h>
12342
12343 /* bytes per L1 data cache line */
12344 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
12345-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12346+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12347
12348 /* bytes per L2 cache line */
12349 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
12350diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
12351index f41cb53..31d3ab4 100644
12352--- a/arch/tile/include/asm/uaccess.h
12353+++ b/arch/tile/include/asm/uaccess.h
12354@@ -417,9 +417,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
12355 const void __user *from,
12356 unsigned long n)
12357 {
12358- int sz = __compiletime_object_size(to);
12359+ size_t sz = __compiletime_object_size(to);
12360
12361- if (likely(sz == -1 || sz >= n))
12362+ if (likely(sz == (size_t)-1 || sz >= n))
12363 n = _copy_from_user(to, from, n);
12364 else
12365 copy_from_user_overflow();
12366diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
12367index 8416240..a012fb7 100644
12368--- a/arch/tile/mm/hugetlbpage.c
12369+++ b/arch/tile/mm/hugetlbpage.c
12370@@ -179,6 +179,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
12371 info.high_limit = TASK_SIZE;
12372 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12373 info.align_offset = 0;
12374+ info.threadstack_offset = 0;
12375 return vm_unmapped_area(&info);
12376 }
12377
12378@@ -196,6 +197,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
12379 info.high_limit = current->mm->mmap_base;
12380 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12381 info.align_offset = 0;
12382+ info.threadstack_offset = 0;
12383 addr = vm_unmapped_area(&info);
12384
12385 /*
12386diff --git a/arch/um/Makefile b/arch/um/Makefile
12387index e4b1a96..16162f8 100644
12388--- a/arch/um/Makefile
12389+++ b/arch/um/Makefile
12390@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
12391 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
12392 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
12393
12394+ifdef CONSTIFY_PLUGIN
12395+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12396+endif
12397+
12398 #This will adjust *FLAGS accordingly to the platform.
12399 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
12400
12401diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
12402index 19e1bdd..3665b77 100644
12403--- a/arch/um/include/asm/cache.h
12404+++ b/arch/um/include/asm/cache.h
12405@@ -1,6 +1,7 @@
12406 #ifndef __UM_CACHE_H
12407 #define __UM_CACHE_H
12408
12409+#include <linux/const.h>
12410
12411 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
12412 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12413@@ -12,6 +13,6 @@
12414 # define L1_CACHE_SHIFT 5
12415 #endif
12416
12417-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12418+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12419
12420 #endif
12421diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
12422index 2e0a6b1..a64d0f5 100644
12423--- a/arch/um/include/asm/kmap_types.h
12424+++ b/arch/um/include/asm/kmap_types.h
12425@@ -8,6 +8,6 @@
12426
12427 /* No more #include "asm/arch/kmap_types.h" ! */
12428
12429-#define KM_TYPE_NR 14
12430+#define KM_TYPE_NR 15
12431
12432 #endif
12433diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
12434index 71c5d13..4c7b9f1 100644
12435--- a/arch/um/include/asm/page.h
12436+++ b/arch/um/include/asm/page.h
12437@@ -14,6 +14,9 @@
12438 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
12439 #define PAGE_MASK (~(PAGE_SIZE-1))
12440
12441+#define ktla_ktva(addr) (addr)
12442+#define ktva_ktla(addr) (addr)
12443+
12444 #ifndef __ASSEMBLY__
12445
12446 struct page;
12447diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
12448index 2b4274e..754fe06 100644
12449--- a/arch/um/include/asm/pgtable-3level.h
12450+++ b/arch/um/include/asm/pgtable-3level.h
12451@@ -58,6 +58,7 @@
12452 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
12453 #define pud_populate(mm, pud, pmd) \
12454 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
12455+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
12456
12457 #ifdef CONFIG_64BIT
12458 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
12459diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
12460index f17bca8..48adb87 100644
12461--- a/arch/um/kernel/process.c
12462+++ b/arch/um/kernel/process.c
12463@@ -356,22 +356,6 @@ int singlestepping(void * t)
12464 return 2;
12465 }
12466
12467-/*
12468- * Only x86 and x86_64 have an arch_align_stack().
12469- * All other arches have "#define arch_align_stack(x) (x)"
12470- * in their asm/exec.h
12471- * As this is included in UML from asm-um/system-generic.h,
12472- * we can use it to behave as the subarch does.
12473- */
12474-#ifndef arch_align_stack
12475-unsigned long arch_align_stack(unsigned long sp)
12476-{
12477- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
12478- sp -= get_random_int() % 8192;
12479- return sp & ~0xf;
12480-}
12481-#endif
12482-
12483 unsigned long get_wchan(struct task_struct *p)
12484 {
12485 unsigned long stack_page, sp, ip;
12486diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
12487index ad8f795..2c7eec6 100644
12488--- a/arch/unicore32/include/asm/cache.h
12489+++ b/arch/unicore32/include/asm/cache.h
12490@@ -12,8 +12,10 @@
12491 #ifndef __UNICORE_CACHE_H__
12492 #define __UNICORE_CACHE_H__
12493
12494-#define L1_CACHE_SHIFT (5)
12495-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12496+#include <linux/const.h>
12497+
12498+#define L1_CACHE_SHIFT 5
12499+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12500
12501 /*
12502 * Memory returned by kmalloc() may be used for DMA, so we must make
12503diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
12504index b7d31ca..9481ec5 100644
12505--- a/arch/x86/Kconfig
12506+++ b/arch/x86/Kconfig
12507@@ -132,7 +132,7 @@ config X86
12508 select RTC_LIB
12509 select HAVE_DEBUG_STACKOVERFLOW
12510 select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
12511- select HAVE_CC_STACKPROTECTOR
12512+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
12513 select GENERIC_CPU_AUTOPROBE
12514 select HAVE_ARCH_AUDITSYSCALL
12515 select ARCH_SUPPORTS_ATOMIC_RMW
12516@@ -266,7 +266,7 @@ config X86_HT
12517
12518 config X86_32_LAZY_GS
12519 def_bool y
12520- depends on X86_32 && !CC_STACKPROTECTOR
12521+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
12522
12523 config ARCH_HWEIGHT_CFLAGS
12524 string
12525@@ -632,6 +632,7 @@ config SCHED_OMIT_FRAME_POINTER
12526
12527 menuconfig HYPERVISOR_GUEST
12528 bool "Linux guest support"
12529+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
12530 ---help---
12531 Say Y here to enable options for running Linux under various hyper-
12532 visors. This option enables basic hypervisor detection and platform
12533@@ -1013,6 +1014,7 @@ config VM86
12534
12535 config X86_16BIT
12536 bool "Enable support for 16-bit segments" if EXPERT
12537+ depends on !GRKERNSEC
12538 default y
12539 ---help---
12540 This option is required by programs like Wine to run 16-bit
12541@@ -1186,6 +1188,7 @@ choice
12542
12543 config NOHIGHMEM
12544 bool "off"
12545+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12546 ---help---
12547 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
12548 However, the address space of 32-bit x86 processors is only 4
12549@@ -1222,6 +1225,7 @@ config NOHIGHMEM
12550
12551 config HIGHMEM4G
12552 bool "4GB"
12553+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12554 ---help---
12555 Select this if you have a 32-bit processor and between 1 and 4
12556 gigabytes of physical RAM.
12557@@ -1274,7 +1278,7 @@ config PAGE_OFFSET
12558 hex
12559 default 0xB0000000 if VMSPLIT_3G_OPT
12560 default 0x80000000 if VMSPLIT_2G
12561- default 0x78000000 if VMSPLIT_2G_OPT
12562+ default 0x70000000 if VMSPLIT_2G_OPT
12563 default 0x40000000 if VMSPLIT_1G
12564 default 0xC0000000
12565 depends on X86_32
12566@@ -1715,6 +1719,7 @@ source kernel/Kconfig.hz
12567
12568 config KEXEC
12569 bool "kexec system call"
12570+ depends on !GRKERNSEC_KMEM
12571 ---help---
12572 kexec is a system call that implements the ability to shutdown your
12573 current kernel, and to start another kernel. It is like a reboot
12574@@ -1900,7 +1905,9 @@ config X86_NEED_RELOCS
12575
12576 config PHYSICAL_ALIGN
12577 hex "Alignment value to which kernel should be aligned"
12578- default "0x200000"
12579+ default "0x1000000"
12580+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
12581+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
12582 range 0x2000 0x1000000 if X86_32
12583 range 0x200000 0x1000000 if X86_64
12584 ---help---
12585@@ -1983,6 +1990,7 @@ config COMPAT_VDSO
12586 def_bool n
12587 prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
12588 depends on X86_32 || IA32_EMULATION
12589+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
12590 ---help---
12591 Certain buggy versions of glibc will crash if they are
12592 presented with a 32-bit vDSO that is not mapped at the address
12593diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
12594index 6983314..54ad7e8 100644
12595--- a/arch/x86/Kconfig.cpu
12596+++ b/arch/x86/Kconfig.cpu
12597@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
12598
12599 config X86_F00F_BUG
12600 def_bool y
12601- depends on M586MMX || M586TSC || M586 || M486
12602+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
12603
12604 config X86_INVD_BUG
12605 def_bool y
12606@@ -327,7 +327,7 @@ config X86_INVD_BUG
12607
12608 config X86_ALIGNMENT_16
12609 def_bool y
12610- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12611+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12612
12613 config X86_INTEL_USERCOPY
12614 def_bool y
12615@@ -369,7 +369,7 @@ config X86_CMPXCHG64
12616 # generates cmov.
12617 config X86_CMOV
12618 def_bool y
12619- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12620+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12621
12622 config X86_MINIMUM_CPU_FAMILY
12623 int
12624diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
12625index 20028da..88d5946 100644
12626--- a/arch/x86/Kconfig.debug
12627+++ b/arch/x86/Kconfig.debug
12628@@ -93,7 +93,7 @@ config EFI_PGT_DUMP
12629 config DEBUG_RODATA
12630 bool "Write protect kernel read-only data structures"
12631 default y
12632- depends on DEBUG_KERNEL
12633+ depends on DEBUG_KERNEL && BROKEN
12634 ---help---
12635 Mark the kernel read-only data as write-protected in the pagetables,
12636 in order to catch accidental (and incorrect) writes to such const
12637@@ -111,7 +111,7 @@ config DEBUG_RODATA_TEST
12638
12639 config DEBUG_SET_MODULE_RONX
12640 bool "Set loadable kernel module data as NX and text as RO"
12641- depends on MODULES
12642+ depends on MODULES && BROKEN
12643 ---help---
12644 This option helps catch unintended modifications to loadable
12645 kernel module's text and read-only data. It also prevents execution
12646diff --git a/arch/x86/Makefile b/arch/x86/Makefile
12647index 5ba2d9c..41e5bb6 100644
12648--- a/arch/x86/Makefile
12649+++ b/arch/x86/Makefile
12650@@ -65,9 +65,6 @@ ifeq ($(CONFIG_X86_32),y)
12651 # CPU-specific tuning. Anything which can be shared with UML should go here.
12652 include $(srctree)/arch/x86/Makefile_32.cpu
12653 KBUILD_CFLAGS += $(cflags-y)
12654-
12655- # temporary until string.h is fixed
12656- KBUILD_CFLAGS += -ffreestanding
12657 else
12658 BITS := 64
12659 UTS_MACHINE := x86_64
12660@@ -107,6 +104,9 @@ else
12661 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
12662 endif
12663
12664+# temporary until string.h is fixed
12665+KBUILD_CFLAGS += -ffreestanding
12666+
12667 # Make sure compiler does not have buggy stack-protector support.
12668 ifdef CONFIG_CC_STACKPROTECTOR
12669 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
12670@@ -181,6 +181,7 @@ archheaders:
12671 $(Q)$(MAKE) $(build)=arch/x86/syscalls all
12672
12673 archprepare:
12674+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
12675 ifeq ($(CONFIG_KEXEC_FILE),y)
12676 $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
12677 endif
12678@@ -264,3 +265,9 @@ define archhelp
12679 echo ' FDARGS="..." arguments for the booted kernel'
12680 echo ' FDINITRD=file initrd for the booted kernel'
12681 endef
12682+
12683+define OLD_LD
12684+
12685+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
12686+*** Please upgrade your binutils to 2.18 or newer
12687+endef
12688diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
12689index 57bbf2f..b100fce 100644
12690--- a/arch/x86/boot/Makefile
12691+++ b/arch/x86/boot/Makefile
12692@@ -58,6 +58,9 @@ clean-files += cpustr.h
12693 # ---------------------------------------------------------------------------
12694
12695 KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
12696+ifdef CONSTIFY_PLUGIN
12697+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12698+endif
12699 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12700 GCOV_PROFILE := n
12701
12702diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
12703index 878e4b9..20537ab 100644
12704--- a/arch/x86/boot/bitops.h
12705+++ b/arch/x86/boot/bitops.h
12706@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12707 u8 v;
12708 const u32 *p = (const u32 *)addr;
12709
12710- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12711+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12712 return v;
12713 }
12714
12715@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12716
12717 static inline void set_bit(int nr, void *addr)
12718 {
12719- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12720+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12721 }
12722
12723 #endif /* BOOT_BITOPS_H */
12724diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
12725index bd49ec6..94c7f58 100644
12726--- a/arch/x86/boot/boot.h
12727+++ b/arch/x86/boot/boot.h
12728@@ -84,7 +84,7 @@ static inline void io_delay(void)
12729 static inline u16 ds(void)
12730 {
12731 u16 seg;
12732- asm("movw %%ds,%0" : "=rm" (seg));
12733+ asm volatile("movw %%ds,%0" : "=rm" (seg));
12734 return seg;
12735 }
12736
12737diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
12738index 0a291cd..9686efc 100644
12739--- a/arch/x86/boot/compressed/Makefile
12740+++ b/arch/x86/boot/compressed/Makefile
12741@@ -30,6 +30,9 @@ KBUILD_CFLAGS += $(cflags-y)
12742 KBUILD_CFLAGS += -mno-mmx -mno-sse
12743 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
12744 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
12745+ifdef CONSTIFY_PLUGIN
12746+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12747+endif
12748
12749 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12750 GCOV_PROFILE := n
12751diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
12752index a53440e..c3dbf1e 100644
12753--- a/arch/x86/boot/compressed/efi_stub_32.S
12754+++ b/arch/x86/boot/compressed/efi_stub_32.S
12755@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
12756 * parameter 2, ..., param n. To make things easy, we save the return
12757 * address of efi_call_phys in a global variable.
12758 */
12759- popl %ecx
12760- movl %ecx, saved_return_addr(%edx)
12761- /* get the function pointer into ECX*/
12762- popl %ecx
12763- movl %ecx, efi_rt_function_ptr(%edx)
12764+ popl saved_return_addr(%edx)
12765+ popl efi_rt_function_ptr(%edx)
12766
12767 /*
12768 * 3. Call the physical function.
12769 */
12770- call *%ecx
12771+ call *efi_rt_function_ptr(%edx)
12772
12773 /*
12774 * 4. Balance the stack. And because EAX contain the return value,
12775@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
12776 1: popl %edx
12777 subl $1b, %edx
12778
12779- movl efi_rt_function_ptr(%edx), %ecx
12780- pushl %ecx
12781+ pushl efi_rt_function_ptr(%edx)
12782
12783 /*
12784 * 10. Push the saved return address onto the stack and return.
12785 */
12786- movl saved_return_addr(%edx), %ecx
12787- pushl %ecx
12788- ret
12789+ jmpl *saved_return_addr(%edx)
12790 ENDPROC(efi_call_phys)
12791 .previous
12792
12793diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
12794index 630384a..278e788 100644
12795--- a/arch/x86/boot/compressed/efi_thunk_64.S
12796+++ b/arch/x86/boot/compressed/efi_thunk_64.S
12797@@ -189,8 +189,8 @@ efi_gdt64:
12798 .long 0 /* Filled out by user */
12799 .word 0
12800 .quad 0x0000000000000000 /* NULL descriptor */
12801- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12802- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12803+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12804+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12805 .quad 0x0080890000000000 /* TS descriptor */
12806 .quad 0x0000000000000000 /* TS continued */
12807 efi_gdt64_end:
12808diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
12809index 1d7fbbc..36ecd58 100644
12810--- a/arch/x86/boot/compressed/head_32.S
12811+++ b/arch/x86/boot/compressed/head_32.S
12812@@ -140,10 +140,10 @@ preferred_addr:
12813 addl %eax, %ebx
12814 notl %eax
12815 andl %eax, %ebx
12816- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12817+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12818 jge 1f
12819 #endif
12820- movl $LOAD_PHYSICAL_ADDR, %ebx
12821+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12822 1:
12823
12824 /* Target address to relocate to for decompression */
12825diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
12826index 6b1766c..ad465c9 100644
12827--- a/arch/x86/boot/compressed/head_64.S
12828+++ b/arch/x86/boot/compressed/head_64.S
12829@@ -94,10 +94,10 @@ ENTRY(startup_32)
12830 addl %eax, %ebx
12831 notl %eax
12832 andl %eax, %ebx
12833- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12834+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12835 jge 1f
12836 #endif
12837- movl $LOAD_PHYSICAL_ADDR, %ebx
12838+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12839 1:
12840
12841 /* Target address to relocate to for decompression */
12842@@ -322,10 +322,10 @@ preferred_addr:
12843 addq %rax, %rbp
12844 notq %rax
12845 andq %rax, %rbp
12846- cmpq $LOAD_PHYSICAL_ADDR, %rbp
12847+ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
12848 jge 1f
12849 #endif
12850- movq $LOAD_PHYSICAL_ADDR, %rbp
12851+ movq $____LOAD_PHYSICAL_ADDR, %rbp
12852 1:
12853
12854 /* Target address to relocate to for decompression */
12855@@ -434,8 +434,8 @@ gdt:
12856 .long gdt
12857 .word 0
12858 .quad 0x0000000000000000 /* NULL descriptor */
12859- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12860- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12861+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12862+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12863 .quad 0x0080890000000000 /* TS descriptor */
12864 .quad 0x0000000000000000 /* TS continued */
12865 gdt_end:
12866diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
12867index a950864..c710239 100644
12868--- a/arch/x86/boot/compressed/misc.c
12869+++ b/arch/x86/boot/compressed/misc.c
12870@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
12871 * Calculate the delta between where vmlinux was linked to load
12872 * and where it was actually loaded.
12873 */
12874- delta = min_addr - LOAD_PHYSICAL_ADDR;
12875+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
12876 if (!delta) {
12877 debug_putstr("No relocation needed... ");
12878 return;
12879@@ -324,7 +324,7 @@ static void parse_elf(void *output)
12880 Elf32_Ehdr ehdr;
12881 Elf32_Phdr *phdrs, *phdr;
12882 #endif
12883- void *dest;
12884+ void *dest, *prev;
12885 int i;
12886
12887 memcpy(&ehdr, output, sizeof(ehdr));
12888@@ -351,13 +351,16 @@ static void parse_elf(void *output)
12889 case PT_LOAD:
12890 #ifdef CONFIG_RELOCATABLE
12891 dest = output;
12892- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
12893+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
12894 #else
12895 dest = (void *)(phdr->p_paddr);
12896 #endif
12897 memcpy(dest,
12898 output + phdr->p_offset,
12899 phdr->p_filesz);
12900+ if (i)
12901+ memset(prev, 0xff, dest - prev);
12902+ prev = dest + phdr->p_filesz;
12903 break;
12904 default: /* Ignore other PT_* */ break;
12905 }
12906@@ -416,7 +419,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
12907 error("Destination address too large");
12908 #endif
12909 #ifndef CONFIG_RELOCATABLE
12910- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
12911+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
12912 error("Wrong destination address");
12913 #endif
12914
12915diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
12916index 1fd7d57..0f7d096 100644
12917--- a/arch/x86/boot/cpucheck.c
12918+++ b/arch/x86/boot/cpucheck.c
12919@@ -125,9 +125,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12920 u32 ecx = MSR_K7_HWCR;
12921 u32 eax, edx;
12922
12923- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12924+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12925 eax &= ~(1 << 15);
12926- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12927+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12928
12929 get_cpuflags(); /* Make sure it really did something */
12930 err = check_cpuflags();
12931@@ -140,9 +140,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12932 u32 ecx = MSR_VIA_FCR;
12933 u32 eax, edx;
12934
12935- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12936+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12937 eax |= (1<<1)|(1<<7);
12938- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12939+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12940
12941 set_bit(X86_FEATURE_CX8, cpu.flags);
12942 err = check_cpuflags();
12943@@ -153,12 +153,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12944 u32 eax, edx;
12945 u32 level = 1;
12946
12947- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12948- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12949- asm("cpuid"
12950+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12951+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12952+ asm volatile("cpuid"
12953 : "+a" (level), "=d" (cpu.flags[0])
12954 : : "ecx", "ebx");
12955- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12956+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12957
12958 err = check_cpuflags();
12959 } else if (err == 0x01 &&
12960diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
12961index 16ef025..91e033b 100644
12962--- a/arch/x86/boot/header.S
12963+++ b/arch/x86/boot/header.S
12964@@ -438,10 +438,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
12965 # single linked list of
12966 # struct setup_data
12967
12968-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
12969+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
12970
12971 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
12972+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
12973+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
12974+#else
12975 #define VO_INIT_SIZE (VO__end - VO__text)
12976+#endif
12977 #if ZO_INIT_SIZE > VO_INIT_SIZE
12978 #define INIT_SIZE ZO_INIT_SIZE
12979 #else
12980diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
12981index db75d07..8e6d0af 100644
12982--- a/arch/x86/boot/memory.c
12983+++ b/arch/x86/boot/memory.c
12984@@ -19,7 +19,7 @@
12985
12986 static int detect_memory_e820(void)
12987 {
12988- int count = 0;
12989+ unsigned int count = 0;
12990 struct biosregs ireg, oreg;
12991 struct e820entry *desc = boot_params.e820_map;
12992 static struct e820entry buf; /* static so it is zeroed */
12993diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
12994index ba3e100..6501b8f 100644
12995--- a/arch/x86/boot/video-vesa.c
12996+++ b/arch/x86/boot/video-vesa.c
12997@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
12998
12999 boot_params.screen_info.vesapm_seg = oreg.es;
13000 boot_params.screen_info.vesapm_off = oreg.di;
13001+ boot_params.screen_info.vesapm_size = oreg.cx;
13002 }
13003
13004 /*
13005diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
13006index 43eda28..5ab5fdb 100644
13007--- a/arch/x86/boot/video.c
13008+++ b/arch/x86/boot/video.c
13009@@ -96,7 +96,7 @@ static void store_mode_params(void)
13010 static unsigned int get_entry(void)
13011 {
13012 char entry_buf[4];
13013- int i, len = 0;
13014+ unsigned int i, len = 0;
13015 int key;
13016 unsigned int v;
13017
13018diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
13019index 9105655..41779c1 100644
13020--- a/arch/x86/crypto/aes-x86_64-asm_64.S
13021+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
13022@@ -8,6 +8,8 @@
13023 * including this sentence is retained in full.
13024 */
13025
13026+#include <asm/alternative-asm.h>
13027+
13028 .extern crypto_ft_tab
13029 .extern crypto_it_tab
13030 .extern crypto_fl_tab
13031@@ -70,6 +72,8 @@
13032 je B192; \
13033 leaq 32(r9),r9;
13034
13035+#define ret pax_force_retaddr; ret
13036+
13037 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
13038 movq r1,r2; \
13039 movq r3,r4; \
13040diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
13041index 6bd2c6c..368c93e 100644
13042--- a/arch/x86/crypto/aesni-intel_asm.S
13043+++ b/arch/x86/crypto/aesni-intel_asm.S
13044@@ -31,6 +31,7 @@
13045
13046 #include <linux/linkage.h>
13047 #include <asm/inst.h>
13048+#include <asm/alternative-asm.h>
13049
13050 /*
13051 * The following macros are used to move an (un)aligned 16 byte value to/from
13052@@ -217,7 +218,7 @@ enc: .octa 0x2
13053 * num_initial_blocks = b mod 4
13054 * encrypt the initial num_initial_blocks blocks and apply ghash on
13055 * the ciphertext
13056-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13057+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13058 * are clobbered
13059 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13060 */
13061@@ -227,8 +228,8 @@ enc: .octa 0x2
13062 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13063 MOVADQ SHUF_MASK(%rip), %xmm14
13064 mov arg7, %r10 # %r10 = AAD
13065- mov arg8, %r12 # %r12 = aadLen
13066- mov %r12, %r11
13067+ mov arg8, %r15 # %r15 = aadLen
13068+ mov %r15, %r11
13069 pxor %xmm\i, %xmm\i
13070
13071 _get_AAD_loop\num_initial_blocks\operation:
13072@@ -237,17 +238,17 @@ _get_AAD_loop\num_initial_blocks\operation:
13073 psrldq $4, %xmm\i
13074 pxor \TMP1, %xmm\i
13075 add $4, %r10
13076- sub $4, %r12
13077+ sub $4, %r15
13078 jne _get_AAD_loop\num_initial_blocks\operation
13079
13080 cmp $16, %r11
13081 je _get_AAD_loop2_done\num_initial_blocks\operation
13082
13083- mov $16, %r12
13084+ mov $16, %r15
13085 _get_AAD_loop2\num_initial_blocks\operation:
13086 psrldq $4, %xmm\i
13087- sub $4, %r12
13088- cmp %r11, %r12
13089+ sub $4, %r15
13090+ cmp %r11, %r15
13091 jne _get_AAD_loop2\num_initial_blocks\operation
13092
13093 _get_AAD_loop2_done\num_initial_blocks\operation:
13094@@ -442,7 +443,7 @@ _initial_blocks_done\num_initial_blocks\operation:
13095 * num_initial_blocks = b mod 4
13096 * encrypt the initial num_initial_blocks blocks and apply ghash on
13097 * the ciphertext
13098-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13099+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13100 * are clobbered
13101 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13102 */
13103@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
13104 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13105 MOVADQ SHUF_MASK(%rip), %xmm14
13106 mov arg7, %r10 # %r10 = AAD
13107- mov arg8, %r12 # %r12 = aadLen
13108- mov %r12, %r11
13109+ mov arg8, %r15 # %r15 = aadLen
13110+ mov %r15, %r11
13111 pxor %xmm\i, %xmm\i
13112 _get_AAD_loop\num_initial_blocks\operation:
13113 movd (%r10), \TMP1
13114@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13115 psrldq $4, %xmm\i
13116 pxor \TMP1, %xmm\i
13117 add $4, %r10
13118- sub $4, %r12
13119+ sub $4, %r15
13120 jne _get_AAD_loop\num_initial_blocks\operation
13121 cmp $16, %r11
13122 je _get_AAD_loop2_done\num_initial_blocks\operation
13123- mov $16, %r12
13124+ mov $16, %r15
13125 _get_AAD_loop2\num_initial_blocks\operation:
13126 psrldq $4, %xmm\i
13127- sub $4, %r12
13128- cmp %r11, %r12
13129+ sub $4, %r15
13130+ cmp %r11, %r15
13131 jne _get_AAD_loop2\num_initial_blocks\operation
13132 _get_AAD_loop2_done\num_initial_blocks\operation:
13133 PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
13134@@ -1280,7 +1281,7 @@ _esb_loop_\@:
13135 *
13136 *****************************************************************************/
13137 ENTRY(aesni_gcm_dec)
13138- push %r12
13139+ push %r15
13140 push %r13
13141 push %r14
13142 mov %rsp, %r14
13143@@ -1290,8 +1291,8 @@ ENTRY(aesni_gcm_dec)
13144 */
13145 sub $VARIABLE_OFFSET, %rsp
13146 and $~63, %rsp # align rsp to 64 bytes
13147- mov %arg6, %r12
13148- movdqu (%r12), %xmm13 # %xmm13 = HashKey
13149+ mov %arg6, %r15
13150+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
13151 movdqa SHUF_MASK(%rip), %xmm2
13152 PSHUFB_XMM %xmm2, %xmm13
13153
13154@@ -1319,10 +1320,10 @@ ENTRY(aesni_gcm_dec)
13155 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
13156 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
13157 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
13158- mov %r13, %r12
13159- and $(3<<4), %r12
13160+ mov %r13, %r15
13161+ and $(3<<4), %r15
13162 jz _initial_num_blocks_is_0_decrypt
13163- cmp $(2<<4), %r12
13164+ cmp $(2<<4), %r15
13165 jb _initial_num_blocks_is_1_decrypt
13166 je _initial_num_blocks_is_2_decrypt
13167 _initial_num_blocks_is_3_decrypt:
13168@@ -1372,16 +1373,16 @@ _zero_cipher_left_decrypt:
13169 sub $16, %r11
13170 add %r13, %r11
13171 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
13172- lea SHIFT_MASK+16(%rip), %r12
13173- sub %r13, %r12
13174+ lea SHIFT_MASK+16(%rip), %r15
13175+ sub %r13, %r15
13176 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
13177 # (%r13 is the number of bytes in plaintext mod 16)
13178- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13179+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13180 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
13181
13182 movdqa %xmm1, %xmm2
13183 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
13184- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13185+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13186 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
13187 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
13188 pand %xmm1, %xmm2
13189@@ -1410,9 +1411,9 @@ _less_than_8_bytes_left_decrypt:
13190 sub $1, %r13
13191 jne _less_than_8_bytes_left_decrypt
13192 _multiple_of_16_bytes_decrypt:
13193- mov arg8, %r12 # %r13 = aadLen (number of bytes)
13194- shl $3, %r12 # convert into number of bits
13195- movd %r12d, %xmm15 # len(A) in %xmm15
13196+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
13197+ shl $3, %r15 # convert into number of bits
13198+ movd %r15d, %xmm15 # len(A) in %xmm15
13199 shl $3, %arg4 # len(C) in bits (*128)
13200 MOVQ_R64_XMM %arg4, %xmm1
13201 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13202@@ -1451,7 +1452,8 @@ _return_T_done_decrypt:
13203 mov %r14, %rsp
13204 pop %r14
13205 pop %r13
13206- pop %r12
13207+ pop %r15
13208+ pax_force_retaddr
13209 ret
13210 ENDPROC(aesni_gcm_dec)
13211
13212@@ -1540,7 +1542,7 @@ ENDPROC(aesni_gcm_dec)
13213 * poly = x^128 + x^127 + x^126 + x^121 + 1
13214 ***************************************************************************/
13215 ENTRY(aesni_gcm_enc)
13216- push %r12
13217+ push %r15
13218 push %r13
13219 push %r14
13220 mov %rsp, %r14
13221@@ -1550,8 +1552,8 @@ ENTRY(aesni_gcm_enc)
13222 #
13223 sub $VARIABLE_OFFSET, %rsp
13224 and $~63, %rsp
13225- mov %arg6, %r12
13226- movdqu (%r12), %xmm13
13227+ mov %arg6, %r15
13228+ movdqu (%r15), %xmm13
13229 movdqa SHUF_MASK(%rip), %xmm2
13230 PSHUFB_XMM %xmm2, %xmm13
13231
13232@@ -1575,13 +1577,13 @@ ENTRY(aesni_gcm_enc)
13233 movdqa %xmm13, HashKey(%rsp)
13234 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
13235 and $-16, %r13
13236- mov %r13, %r12
13237+ mov %r13, %r15
13238
13239 # Encrypt first few blocks
13240
13241- and $(3<<4), %r12
13242+ and $(3<<4), %r15
13243 jz _initial_num_blocks_is_0_encrypt
13244- cmp $(2<<4), %r12
13245+ cmp $(2<<4), %r15
13246 jb _initial_num_blocks_is_1_encrypt
13247 je _initial_num_blocks_is_2_encrypt
13248 _initial_num_blocks_is_3_encrypt:
13249@@ -1634,14 +1636,14 @@ _zero_cipher_left_encrypt:
13250 sub $16, %r11
13251 add %r13, %r11
13252 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
13253- lea SHIFT_MASK+16(%rip), %r12
13254- sub %r13, %r12
13255+ lea SHIFT_MASK+16(%rip), %r15
13256+ sub %r13, %r15
13257 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
13258 # (%r13 is the number of bytes in plaintext mod 16)
13259- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13260+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13261 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
13262 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
13263- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13264+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13265 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
13266 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
13267 movdqa SHUF_MASK(%rip), %xmm10
13268@@ -1674,9 +1676,9 @@ _less_than_8_bytes_left_encrypt:
13269 sub $1, %r13
13270 jne _less_than_8_bytes_left_encrypt
13271 _multiple_of_16_bytes_encrypt:
13272- mov arg8, %r12 # %r12 = addLen (number of bytes)
13273- shl $3, %r12
13274- movd %r12d, %xmm15 # len(A) in %xmm15
13275+ mov arg8, %r15 # %r15 = addLen (number of bytes)
13276+ shl $3, %r15
13277+ movd %r15d, %xmm15 # len(A) in %xmm15
13278 shl $3, %arg4 # len(C) in bits (*128)
13279 MOVQ_R64_XMM %arg4, %xmm1
13280 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13281@@ -1715,7 +1717,8 @@ _return_T_done_encrypt:
13282 mov %r14, %rsp
13283 pop %r14
13284 pop %r13
13285- pop %r12
13286+ pop %r15
13287+ pax_force_retaddr
13288 ret
13289 ENDPROC(aesni_gcm_enc)
13290
13291@@ -1733,6 +1736,7 @@ _key_expansion_256a:
13292 pxor %xmm1, %xmm0
13293 movaps %xmm0, (TKEYP)
13294 add $0x10, TKEYP
13295+ pax_force_retaddr
13296 ret
13297 ENDPROC(_key_expansion_128)
13298 ENDPROC(_key_expansion_256a)
13299@@ -1759,6 +1763,7 @@ _key_expansion_192a:
13300 shufps $0b01001110, %xmm2, %xmm1
13301 movaps %xmm1, 0x10(TKEYP)
13302 add $0x20, TKEYP
13303+ pax_force_retaddr
13304 ret
13305 ENDPROC(_key_expansion_192a)
13306
13307@@ -1779,6 +1784,7 @@ _key_expansion_192b:
13308
13309 movaps %xmm0, (TKEYP)
13310 add $0x10, TKEYP
13311+ pax_force_retaddr
13312 ret
13313 ENDPROC(_key_expansion_192b)
13314
13315@@ -1792,6 +1798,7 @@ _key_expansion_256b:
13316 pxor %xmm1, %xmm2
13317 movaps %xmm2, (TKEYP)
13318 add $0x10, TKEYP
13319+ pax_force_retaddr
13320 ret
13321 ENDPROC(_key_expansion_256b)
13322
13323@@ -1905,6 +1912,7 @@ ENTRY(aesni_set_key)
13324 #ifndef __x86_64__
13325 popl KEYP
13326 #endif
13327+ pax_force_retaddr
13328 ret
13329 ENDPROC(aesni_set_key)
13330
13331@@ -1927,6 +1935,7 @@ ENTRY(aesni_enc)
13332 popl KLEN
13333 popl KEYP
13334 #endif
13335+ pax_force_retaddr
13336 ret
13337 ENDPROC(aesni_enc)
13338
13339@@ -1985,6 +1994,7 @@ _aesni_enc1:
13340 AESENC KEY STATE
13341 movaps 0x70(TKEYP), KEY
13342 AESENCLAST KEY STATE
13343+ pax_force_retaddr
13344 ret
13345 ENDPROC(_aesni_enc1)
13346
13347@@ -2094,6 +2104,7 @@ _aesni_enc4:
13348 AESENCLAST KEY STATE2
13349 AESENCLAST KEY STATE3
13350 AESENCLAST KEY STATE4
13351+ pax_force_retaddr
13352 ret
13353 ENDPROC(_aesni_enc4)
13354
13355@@ -2117,6 +2128,7 @@ ENTRY(aesni_dec)
13356 popl KLEN
13357 popl KEYP
13358 #endif
13359+ pax_force_retaddr
13360 ret
13361 ENDPROC(aesni_dec)
13362
13363@@ -2175,6 +2187,7 @@ _aesni_dec1:
13364 AESDEC KEY STATE
13365 movaps 0x70(TKEYP), KEY
13366 AESDECLAST KEY STATE
13367+ pax_force_retaddr
13368 ret
13369 ENDPROC(_aesni_dec1)
13370
13371@@ -2284,6 +2297,7 @@ _aesni_dec4:
13372 AESDECLAST KEY STATE2
13373 AESDECLAST KEY STATE3
13374 AESDECLAST KEY STATE4
13375+ pax_force_retaddr
13376 ret
13377 ENDPROC(_aesni_dec4)
13378
13379@@ -2342,6 +2356,7 @@ ENTRY(aesni_ecb_enc)
13380 popl KEYP
13381 popl LEN
13382 #endif
13383+ pax_force_retaddr
13384 ret
13385 ENDPROC(aesni_ecb_enc)
13386
13387@@ -2401,6 +2416,7 @@ ENTRY(aesni_ecb_dec)
13388 popl KEYP
13389 popl LEN
13390 #endif
13391+ pax_force_retaddr
13392 ret
13393 ENDPROC(aesni_ecb_dec)
13394
13395@@ -2443,6 +2459,7 @@ ENTRY(aesni_cbc_enc)
13396 popl LEN
13397 popl IVP
13398 #endif
13399+ pax_force_retaddr
13400 ret
13401 ENDPROC(aesni_cbc_enc)
13402
13403@@ -2534,6 +2551,7 @@ ENTRY(aesni_cbc_dec)
13404 popl LEN
13405 popl IVP
13406 #endif
13407+ pax_force_retaddr
13408 ret
13409 ENDPROC(aesni_cbc_dec)
13410
13411@@ -2561,6 +2579,7 @@ _aesni_inc_init:
13412 mov $1, TCTR_LOW
13413 MOVQ_R64_XMM TCTR_LOW INC
13414 MOVQ_R64_XMM CTR TCTR_LOW
13415+ pax_force_retaddr
13416 ret
13417 ENDPROC(_aesni_inc_init)
13418
13419@@ -2590,6 +2609,7 @@ _aesni_inc:
13420 .Linc_low:
13421 movaps CTR, IV
13422 PSHUFB_XMM BSWAP_MASK IV
13423+ pax_force_retaddr
13424 ret
13425 ENDPROC(_aesni_inc)
13426
13427@@ -2651,6 +2671,7 @@ ENTRY(aesni_ctr_enc)
13428 .Lctr_enc_ret:
13429 movups IV, (IVP)
13430 .Lctr_enc_just_ret:
13431+ pax_force_retaddr
13432 ret
13433 ENDPROC(aesni_ctr_enc)
13434
13435@@ -2777,6 +2798,7 @@ ENTRY(aesni_xts_crypt8)
13436 pxor INC, STATE4
13437 movdqu STATE4, 0x70(OUTP)
13438
13439+ pax_force_retaddr
13440 ret
13441 ENDPROC(aesni_xts_crypt8)
13442
13443diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13444index 246c670..466e2d6 100644
13445--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
13446+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13447@@ -21,6 +21,7 @@
13448 */
13449
13450 #include <linux/linkage.h>
13451+#include <asm/alternative-asm.h>
13452
13453 .file "blowfish-x86_64-asm.S"
13454 .text
13455@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
13456 jnz .L__enc_xor;
13457
13458 write_block();
13459+ pax_force_retaddr
13460 ret;
13461 .L__enc_xor:
13462 xor_block();
13463+ pax_force_retaddr
13464 ret;
13465 ENDPROC(__blowfish_enc_blk)
13466
13467@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
13468
13469 movq %r11, %rbp;
13470
13471+ pax_force_retaddr
13472 ret;
13473 ENDPROC(blowfish_dec_blk)
13474
13475@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
13476
13477 popq %rbx;
13478 popq %rbp;
13479+ pax_force_retaddr
13480 ret;
13481
13482 .L__enc_xor4:
13483@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
13484
13485 popq %rbx;
13486 popq %rbp;
13487+ pax_force_retaddr
13488 ret;
13489 ENDPROC(__blowfish_enc_blk_4way)
13490
13491@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
13492 popq %rbx;
13493 popq %rbp;
13494
13495+ pax_force_retaddr
13496 ret;
13497 ENDPROC(blowfish_dec_blk_4way)
13498diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13499index ce71f92..1dce7ec 100644
13500--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13501+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13502@@ -16,6 +16,7 @@
13503 */
13504
13505 #include <linux/linkage.h>
13506+#include <asm/alternative-asm.h>
13507
13508 #define CAMELLIA_TABLE_BYTE_LEN 272
13509
13510@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13511 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
13512 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
13513 %rcx, (%r9));
13514+ pax_force_retaddr
13515 ret;
13516 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13517
13518@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13519 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
13520 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
13521 %rax, (%r9));
13522+ pax_force_retaddr
13523 ret;
13524 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13525
13526@@ -780,6 +783,7 @@ __camellia_enc_blk16:
13527 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13528 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
13529
13530+ pax_force_retaddr
13531 ret;
13532
13533 .align 8
13534@@ -865,6 +869,7 @@ __camellia_dec_blk16:
13535 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13536 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
13537
13538+ pax_force_retaddr
13539 ret;
13540
13541 .align 8
13542@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
13543 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13544 %xmm8, %rsi);
13545
13546+ pax_force_retaddr
13547 ret;
13548 ENDPROC(camellia_ecb_enc_16way)
13549
13550@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
13551 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13552 %xmm8, %rsi);
13553
13554+ pax_force_retaddr
13555 ret;
13556 ENDPROC(camellia_ecb_dec_16way)
13557
13558@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
13559 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13560 %xmm8, %rsi);
13561
13562+ pax_force_retaddr
13563 ret;
13564 ENDPROC(camellia_cbc_dec_16way)
13565
13566@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
13567 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13568 %xmm8, %rsi);
13569
13570+ pax_force_retaddr
13571 ret;
13572 ENDPROC(camellia_ctr_16way)
13573
13574@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
13575 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13576 %xmm8, %rsi);
13577
13578+ pax_force_retaddr
13579 ret;
13580 ENDPROC(camellia_xts_crypt_16way)
13581
13582diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13583index 0e0b886..5a3123c 100644
13584--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13585+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13586@@ -11,6 +11,7 @@
13587 */
13588
13589 #include <linux/linkage.h>
13590+#include <asm/alternative-asm.h>
13591
13592 #define CAMELLIA_TABLE_BYTE_LEN 272
13593
13594@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13595 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
13596 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
13597 %rcx, (%r9));
13598+ pax_force_retaddr
13599 ret;
13600 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13601
13602@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13603 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
13604 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
13605 %rax, (%r9));
13606+ pax_force_retaddr
13607 ret;
13608 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13609
13610@@ -820,6 +823,7 @@ __camellia_enc_blk32:
13611 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13612 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
13613
13614+ pax_force_retaddr
13615 ret;
13616
13617 .align 8
13618@@ -905,6 +909,7 @@ __camellia_dec_blk32:
13619 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13620 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
13621
13622+ pax_force_retaddr
13623 ret;
13624
13625 .align 8
13626@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
13627
13628 vzeroupper;
13629
13630+ pax_force_retaddr
13631 ret;
13632 ENDPROC(camellia_ecb_enc_32way)
13633
13634@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
13635
13636 vzeroupper;
13637
13638+ pax_force_retaddr
13639 ret;
13640 ENDPROC(camellia_ecb_dec_32way)
13641
13642@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
13643
13644 vzeroupper;
13645
13646+ pax_force_retaddr
13647 ret;
13648 ENDPROC(camellia_cbc_dec_32way)
13649
13650@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
13651
13652 vzeroupper;
13653
13654+ pax_force_retaddr
13655 ret;
13656 ENDPROC(camellia_ctr_32way)
13657
13658@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
13659
13660 vzeroupper;
13661
13662+ pax_force_retaddr
13663 ret;
13664 ENDPROC(camellia_xts_crypt_32way)
13665
13666diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
13667index 310319c..db3d7b5 100644
13668--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
13669+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
13670@@ -21,6 +21,7 @@
13671 */
13672
13673 #include <linux/linkage.h>
13674+#include <asm/alternative-asm.h>
13675
13676 .file "camellia-x86_64-asm_64.S"
13677 .text
13678@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
13679 enc_outunpack(mov, RT1);
13680
13681 movq RRBP, %rbp;
13682+ pax_force_retaddr
13683 ret;
13684
13685 .L__enc_xor:
13686 enc_outunpack(xor, RT1);
13687
13688 movq RRBP, %rbp;
13689+ pax_force_retaddr
13690 ret;
13691 ENDPROC(__camellia_enc_blk)
13692
13693@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
13694 dec_outunpack();
13695
13696 movq RRBP, %rbp;
13697+ pax_force_retaddr
13698 ret;
13699 ENDPROC(camellia_dec_blk)
13700
13701@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
13702
13703 movq RRBP, %rbp;
13704 popq %rbx;
13705+ pax_force_retaddr
13706 ret;
13707
13708 .L__enc2_xor:
13709@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
13710
13711 movq RRBP, %rbp;
13712 popq %rbx;
13713+ pax_force_retaddr
13714 ret;
13715 ENDPROC(__camellia_enc_blk_2way)
13716
13717@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
13718
13719 movq RRBP, %rbp;
13720 movq RXOR, %rbx;
13721+ pax_force_retaddr
13722 ret;
13723 ENDPROC(camellia_dec_blk_2way)
13724diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13725index c35fd5d..2d8c7db 100644
13726--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13727+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13728@@ -24,6 +24,7 @@
13729 */
13730
13731 #include <linux/linkage.h>
13732+#include <asm/alternative-asm.h>
13733
13734 .file "cast5-avx-x86_64-asm_64.S"
13735
13736@@ -281,6 +282,7 @@ __cast5_enc_blk16:
13737 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13738 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13739
13740+ pax_force_retaddr
13741 ret;
13742 ENDPROC(__cast5_enc_blk16)
13743
13744@@ -352,6 +354,7 @@ __cast5_dec_blk16:
13745 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13746 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13747
13748+ pax_force_retaddr
13749 ret;
13750
13751 .L__skip_dec:
13752@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
13753 vmovdqu RR4, (6*4*4)(%r11);
13754 vmovdqu RL4, (7*4*4)(%r11);
13755
13756+ pax_force_retaddr
13757 ret;
13758 ENDPROC(cast5_ecb_enc_16way)
13759
13760@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
13761 vmovdqu RR4, (6*4*4)(%r11);
13762 vmovdqu RL4, (7*4*4)(%r11);
13763
13764+ pax_force_retaddr
13765 ret;
13766 ENDPROC(cast5_ecb_dec_16way)
13767
13768@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
13769 * %rdx: src
13770 */
13771
13772- pushq %r12;
13773+ pushq %r14;
13774
13775 movq %rsi, %r11;
13776- movq %rdx, %r12;
13777+ movq %rdx, %r14;
13778
13779 vmovdqu (0*16)(%rdx), RL1;
13780 vmovdqu (1*16)(%rdx), RR1;
13781@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
13782 call __cast5_dec_blk16;
13783
13784 /* xor with src */
13785- vmovq (%r12), RX;
13786+ vmovq (%r14), RX;
13787 vpshufd $0x4f, RX, RX;
13788 vpxor RX, RR1, RR1;
13789- vpxor 0*16+8(%r12), RL1, RL1;
13790- vpxor 1*16+8(%r12), RR2, RR2;
13791- vpxor 2*16+8(%r12), RL2, RL2;
13792- vpxor 3*16+8(%r12), RR3, RR3;
13793- vpxor 4*16+8(%r12), RL3, RL3;
13794- vpxor 5*16+8(%r12), RR4, RR4;
13795- vpxor 6*16+8(%r12), RL4, RL4;
13796+ vpxor 0*16+8(%r14), RL1, RL1;
13797+ vpxor 1*16+8(%r14), RR2, RR2;
13798+ vpxor 2*16+8(%r14), RL2, RL2;
13799+ vpxor 3*16+8(%r14), RR3, RR3;
13800+ vpxor 4*16+8(%r14), RL3, RL3;
13801+ vpxor 5*16+8(%r14), RR4, RR4;
13802+ vpxor 6*16+8(%r14), RL4, RL4;
13803
13804 vmovdqu RR1, (0*16)(%r11);
13805 vmovdqu RL1, (1*16)(%r11);
13806@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
13807 vmovdqu RR4, (6*16)(%r11);
13808 vmovdqu RL4, (7*16)(%r11);
13809
13810- popq %r12;
13811+ popq %r14;
13812
13813+ pax_force_retaddr
13814 ret;
13815 ENDPROC(cast5_cbc_dec_16way)
13816
13817@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
13818 * %rcx: iv (big endian, 64bit)
13819 */
13820
13821- pushq %r12;
13822+ pushq %r14;
13823
13824 movq %rsi, %r11;
13825- movq %rdx, %r12;
13826+ movq %rdx, %r14;
13827
13828 vpcmpeqd RTMP, RTMP, RTMP;
13829 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
13830@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
13831 call __cast5_enc_blk16;
13832
13833 /* dst = src ^ iv */
13834- vpxor (0*16)(%r12), RR1, RR1;
13835- vpxor (1*16)(%r12), RL1, RL1;
13836- vpxor (2*16)(%r12), RR2, RR2;
13837- vpxor (3*16)(%r12), RL2, RL2;
13838- vpxor (4*16)(%r12), RR3, RR3;
13839- vpxor (5*16)(%r12), RL3, RL3;
13840- vpxor (6*16)(%r12), RR4, RR4;
13841- vpxor (7*16)(%r12), RL4, RL4;
13842+ vpxor (0*16)(%r14), RR1, RR1;
13843+ vpxor (1*16)(%r14), RL1, RL1;
13844+ vpxor (2*16)(%r14), RR2, RR2;
13845+ vpxor (3*16)(%r14), RL2, RL2;
13846+ vpxor (4*16)(%r14), RR3, RR3;
13847+ vpxor (5*16)(%r14), RL3, RL3;
13848+ vpxor (6*16)(%r14), RR4, RR4;
13849+ vpxor (7*16)(%r14), RL4, RL4;
13850 vmovdqu RR1, (0*16)(%r11);
13851 vmovdqu RL1, (1*16)(%r11);
13852 vmovdqu RR2, (2*16)(%r11);
13853@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
13854 vmovdqu RR4, (6*16)(%r11);
13855 vmovdqu RL4, (7*16)(%r11);
13856
13857- popq %r12;
13858+ popq %r14;
13859
13860+ pax_force_retaddr
13861 ret;
13862 ENDPROC(cast5_ctr_16way)
13863diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13864index e3531f8..e123f35 100644
13865--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13866+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13867@@ -24,6 +24,7 @@
13868 */
13869
13870 #include <linux/linkage.h>
13871+#include <asm/alternative-asm.h>
13872 #include "glue_helper-asm-avx.S"
13873
13874 .file "cast6-avx-x86_64-asm_64.S"
13875@@ -295,6 +296,7 @@ __cast6_enc_blk8:
13876 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13877 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13878
13879+ pax_force_retaddr
13880 ret;
13881 ENDPROC(__cast6_enc_blk8)
13882
13883@@ -340,6 +342,7 @@ __cast6_dec_blk8:
13884 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13885 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13886
13887+ pax_force_retaddr
13888 ret;
13889 ENDPROC(__cast6_dec_blk8)
13890
13891@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
13892
13893 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13894
13895+ pax_force_retaddr
13896 ret;
13897 ENDPROC(cast6_ecb_enc_8way)
13898
13899@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
13900
13901 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13902
13903+ pax_force_retaddr
13904 ret;
13905 ENDPROC(cast6_ecb_dec_8way)
13906
13907@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
13908 * %rdx: src
13909 */
13910
13911- pushq %r12;
13912+ pushq %r14;
13913
13914 movq %rsi, %r11;
13915- movq %rdx, %r12;
13916+ movq %rdx, %r14;
13917
13918 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13919
13920 call __cast6_dec_blk8;
13921
13922- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13923+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13924
13925- popq %r12;
13926+ popq %r14;
13927
13928+ pax_force_retaddr
13929 ret;
13930 ENDPROC(cast6_cbc_dec_8way)
13931
13932@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
13933 * %rcx: iv (little endian, 128bit)
13934 */
13935
13936- pushq %r12;
13937+ pushq %r14;
13938
13939 movq %rsi, %r11;
13940- movq %rdx, %r12;
13941+ movq %rdx, %r14;
13942
13943 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
13944 RD2, RX, RKR, RKM);
13945
13946 call __cast6_enc_blk8;
13947
13948- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13949+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13950
13951- popq %r12;
13952+ popq %r14;
13953
13954+ pax_force_retaddr
13955 ret;
13956 ENDPROC(cast6_ctr_8way)
13957
13958@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
13959 /* dst <= regs xor IVs(in dst) */
13960 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13961
13962+ pax_force_retaddr
13963 ret;
13964 ENDPROC(cast6_xts_enc_8way)
13965
13966@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
13967 /* dst <= regs xor IVs(in dst) */
13968 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13969
13970+ pax_force_retaddr
13971 ret;
13972 ENDPROC(cast6_xts_dec_8way)
13973diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13974index 26d49eb..8bf39c8 100644
13975--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13976+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13977@@ -45,6 +45,7 @@
13978
13979 #include <asm/inst.h>
13980 #include <linux/linkage.h>
13981+#include <asm/alternative-asm.h>
13982
13983 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
13984
13985@@ -309,6 +310,7 @@ do_return:
13986 popq %rsi
13987 popq %rdi
13988 popq %rbx
13989+ pax_force_retaddr
13990 ret
13991
13992 ################################################################
13993@@ -330,7 +332,7 @@ ENDPROC(crc_pcl)
13994 ## PCLMULQDQ tables
13995 ## Table is 128 entries x 2 words (8 bytes) each
13996 ################################################################
13997-.section .rotata, "a", %progbits
13998+.section .rodata, "a", %progbits
13999 .align 8
14000 K_table:
14001 .long 0x493c7d27, 0x00000001
14002diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14003index 5d1e007..098cb4f 100644
14004--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
14005+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14006@@ -18,6 +18,7 @@
14007
14008 #include <linux/linkage.h>
14009 #include <asm/inst.h>
14010+#include <asm/alternative-asm.h>
14011
14012 .data
14013
14014@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
14015 psrlq $1, T2
14016 pxor T2, T1
14017 pxor T1, DATA
14018+ pax_force_retaddr
14019 ret
14020 ENDPROC(__clmul_gf128mul_ble)
14021
14022@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
14023 call __clmul_gf128mul_ble
14024 PSHUFB_XMM BSWAP DATA
14025 movups DATA, (%rdi)
14026+ pax_force_retaddr
14027 ret
14028 ENDPROC(clmul_ghash_mul)
14029
14030@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
14031 PSHUFB_XMM BSWAP DATA
14032 movups DATA, (%rdi)
14033 .Lupdate_just_ret:
14034+ pax_force_retaddr
14035 ret
14036 ENDPROC(clmul_ghash_update)
14037diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14038index 9279e0b..c4b3d2c 100644
14039--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
14040+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14041@@ -1,4 +1,5 @@
14042 #include <linux/linkage.h>
14043+#include <asm/alternative-asm.h>
14044
14045 # enter salsa20_encrypt_bytes
14046 ENTRY(salsa20_encrypt_bytes)
14047@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
14048 add %r11,%rsp
14049 mov %rdi,%rax
14050 mov %rsi,%rdx
14051+ pax_force_retaddr
14052 ret
14053 # bytesatleast65:
14054 ._bytesatleast65:
14055@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
14056 add %r11,%rsp
14057 mov %rdi,%rax
14058 mov %rsi,%rdx
14059+ pax_force_retaddr
14060 ret
14061 ENDPROC(salsa20_keysetup)
14062
14063@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
14064 add %r11,%rsp
14065 mov %rdi,%rax
14066 mov %rsi,%rdx
14067+ pax_force_retaddr
14068 ret
14069 ENDPROC(salsa20_ivsetup)
14070diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14071index 2f202f4..d9164d6 100644
14072--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14073+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14074@@ -24,6 +24,7 @@
14075 */
14076
14077 #include <linux/linkage.h>
14078+#include <asm/alternative-asm.h>
14079 #include "glue_helper-asm-avx.S"
14080
14081 .file "serpent-avx-x86_64-asm_64.S"
14082@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
14083 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14084 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14085
14086+ pax_force_retaddr
14087 ret;
14088 ENDPROC(__serpent_enc_blk8_avx)
14089
14090@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
14091 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14092 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14093
14094+ pax_force_retaddr
14095 ret;
14096 ENDPROC(__serpent_dec_blk8_avx)
14097
14098@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
14099
14100 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14101
14102+ pax_force_retaddr
14103 ret;
14104 ENDPROC(serpent_ecb_enc_8way_avx)
14105
14106@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
14107
14108 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14109
14110+ pax_force_retaddr
14111 ret;
14112 ENDPROC(serpent_ecb_dec_8way_avx)
14113
14114@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
14115
14116 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14117
14118+ pax_force_retaddr
14119 ret;
14120 ENDPROC(serpent_cbc_dec_8way_avx)
14121
14122@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
14123
14124 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14125
14126+ pax_force_retaddr
14127 ret;
14128 ENDPROC(serpent_ctr_8way_avx)
14129
14130@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
14131 /* dst <= regs xor IVs(in dst) */
14132 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14133
14134+ pax_force_retaddr
14135 ret;
14136 ENDPROC(serpent_xts_enc_8way_avx)
14137
14138@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
14139 /* dst <= regs xor IVs(in dst) */
14140 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14141
14142+ pax_force_retaddr
14143 ret;
14144 ENDPROC(serpent_xts_dec_8way_avx)
14145diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
14146index b222085..abd483c 100644
14147--- a/arch/x86/crypto/serpent-avx2-asm_64.S
14148+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
14149@@ -15,6 +15,7 @@
14150 */
14151
14152 #include <linux/linkage.h>
14153+#include <asm/alternative-asm.h>
14154 #include "glue_helper-asm-avx2.S"
14155
14156 .file "serpent-avx2-asm_64.S"
14157@@ -610,6 +611,7 @@ __serpent_enc_blk16:
14158 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14159 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14160
14161+ pax_force_retaddr
14162 ret;
14163 ENDPROC(__serpent_enc_blk16)
14164
14165@@ -664,6 +666,7 @@ __serpent_dec_blk16:
14166 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14167 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14168
14169+ pax_force_retaddr
14170 ret;
14171 ENDPROC(__serpent_dec_blk16)
14172
14173@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
14174
14175 vzeroupper;
14176
14177+ pax_force_retaddr
14178 ret;
14179 ENDPROC(serpent_ecb_enc_16way)
14180
14181@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
14182
14183 vzeroupper;
14184
14185+ pax_force_retaddr
14186 ret;
14187 ENDPROC(serpent_ecb_dec_16way)
14188
14189@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
14190
14191 vzeroupper;
14192
14193+ pax_force_retaddr
14194 ret;
14195 ENDPROC(serpent_cbc_dec_16way)
14196
14197@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
14198
14199 vzeroupper;
14200
14201+ pax_force_retaddr
14202 ret;
14203 ENDPROC(serpent_ctr_16way)
14204
14205@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
14206
14207 vzeroupper;
14208
14209+ pax_force_retaddr
14210 ret;
14211 ENDPROC(serpent_xts_enc_16way)
14212
14213@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
14214
14215 vzeroupper;
14216
14217+ pax_force_retaddr
14218 ret;
14219 ENDPROC(serpent_xts_dec_16way)
14220diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14221index acc066c..1559cc4 100644
14222--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14223+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14224@@ -25,6 +25,7 @@
14225 */
14226
14227 #include <linux/linkage.h>
14228+#include <asm/alternative-asm.h>
14229
14230 .file "serpent-sse2-x86_64-asm_64.S"
14231 .text
14232@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
14233 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14234 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14235
14236+ pax_force_retaddr
14237 ret;
14238
14239 .L__enc_xor8:
14240 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14241 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14242
14243+ pax_force_retaddr
14244 ret;
14245 ENDPROC(__serpent_enc_blk_8way)
14246
14247@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
14248 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14249 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14250
14251+ pax_force_retaddr
14252 ret;
14253 ENDPROC(serpent_dec_blk_8way)
14254diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
14255index a410950..9dfe7ad 100644
14256--- a/arch/x86/crypto/sha1_ssse3_asm.S
14257+++ b/arch/x86/crypto/sha1_ssse3_asm.S
14258@@ -29,6 +29,7 @@
14259 */
14260
14261 #include <linux/linkage.h>
14262+#include <asm/alternative-asm.h>
14263
14264 #define CTX %rdi // arg1
14265 #define BUF %rsi // arg2
14266@@ -75,9 +76,9 @@
14267
14268 push %rbx
14269 push %rbp
14270- push %r12
14271+ push %r14
14272
14273- mov %rsp, %r12
14274+ mov %rsp, %r14
14275 sub $64, %rsp # allocate workspace
14276 and $~15, %rsp # align stack
14277
14278@@ -99,11 +100,12 @@
14279 xor %rax, %rax
14280 rep stosq
14281
14282- mov %r12, %rsp # deallocate workspace
14283+ mov %r14, %rsp # deallocate workspace
14284
14285- pop %r12
14286+ pop %r14
14287 pop %rbp
14288 pop %rbx
14289+ pax_force_retaddr
14290 ret
14291
14292 ENDPROC(\name)
14293diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
14294index 642f156..51a513c 100644
14295--- a/arch/x86/crypto/sha256-avx-asm.S
14296+++ b/arch/x86/crypto/sha256-avx-asm.S
14297@@ -49,6 +49,7 @@
14298
14299 #ifdef CONFIG_AS_AVX
14300 #include <linux/linkage.h>
14301+#include <asm/alternative-asm.h>
14302
14303 ## assume buffers not aligned
14304 #define VMOVDQ vmovdqu
14305@@ -460,6 +461,7 @@ done_hash:
14306 popq %r13
14307 popq %rbp
14308 popq %rbx
14309+ pax_force_retaddr
14310 ret
14311 ENDPROC(sha256_transform_avx)
14312
14313diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
14314index 9e86944..3795e6a 100644
14315--- a/arch/x86/crypto/sha256-avx2-asm.S
14316+++ b/arch/x86/crypto/sha256-avx2-asm.S
14317@@ -50,6 +50,7 @@
14318
14319 #ifdef CONFIG_AS_AVX2
14320 #include <linux/linkage.h>
14321+#include <asm/alternative-asm.h>
14322
14323 ## assume buffers not aligned
14324 #define VMOVDQ vmovdqu
14325@@ -720,6 +721,7 @@ done_hash:
14326 popq %r12
14327 popq %rbp
14328 popq %rbx
14329+ pax_force_retaddr
14330 ret
14331 ENDPROC(sha256_transform_rorx)
14332
14333diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
14334index f833b74..8c62a9e 100644
14335--- a/arch/x86/crypto/sha256-ssse3-asm.S
14336+++ b/arch/x86/crypto/sha256-ssse3-asm.S
14337@@ -47,6 +47,7 @@
14338 ########################################################################
14339
14340 #include <linux/linkage.h>
14341+#include <asm/alternative-asm.h>
14342
14343 ## assume buffers not aligned
14344 #define MOVDQ movdqu
14345@@ -471,6 +472,7 @@ done_hash:
14346 popq %rbp
14347 popq %rbx
14348
14349+ pax_force_retaddr
14350 ret
14351 ENDPROC(sha256_transform_ssse3)
14352
14353diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
14354index 974dde9..a823ff9 100644
14355--- a/arch/x86/crypto/sha512-avx-asm.S
14356+++ b/arch/x86/crypto/sha512-avx-asm.S
14357@@ -49,6 +49,7 @@
14358
14359 #ifdef CONFIG_AS_AVX
14360 #include <linux/linkage.h>
14361+#include <asm/alternative-asm.h>
14362
14363 .text
14364
14365@@ -364,6 +365,7 @@ updateblock:
14366 mov frame_RSPSAVE(%rsp), %rsp
14367
14368 nowork:
14369+ pax_force_retaddr
14370 ret
14371 ENDPROC(sha512_transform_avx)
14372
14373diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
14374index 568b961..ed20c37 100644
14375--- a/arch/x86/crypto/sha512-avx2-asm.S
14376+++ b/arch/x86/crypto/sha512-avx2-asm.S
14377@@ -51,6 +51,7 @@
14378
14379 #ifdef CONFIG_AS_AVX2
14380 #include <linux/linkage.h>
14381+#include <asm/alternative-asm.h>
14382
14383 .text
14384
14385@@ -678,6 +679,7 @@ done_hash:
14386
14387 # Restore Stack Pointer
14388 mov frame_RSPSAVE(%rsp), %rsp
14389+ pax_force_retaddr
14390 ret
14391 ENDPROC(sha512_transform_rorx)
14392
14393diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
14394index fb56855..6edd768 100644
14395--- a/arch/x86/crypto/sha512-ssse3-asm.S
14396+++ b/arch/x86/crypto/sha512-ssse3-asm.S
14397@@ -48,6 +48,7 @@
14398 ########################################################################
14399
14400 #include <linux/linkage.h>
14401+#include <asm/alternative-asm.h>
14402
14403 .text
14404
14405@@ -363,6 +364,7 @@ updateblock:
14406 mov frame_RSPSAVE(%rsp), %rsp
14407
14408 nowork:
14409+ pax_force_retaddr
14410 ret
14411 ENDPROC(sha512_transform_ssse3)
14412
14413diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14414index 0505813..b067311 100644
14415--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14416+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14417@@ -24,6 +24,7 @@
14418 */
14419
14420 #include <linux/linkage.h>
14421+#include <asm/alternative-asm.h>
14422 #include "glue_helper-asm-avx.S"
14423
14424 .file "twofish-avx-x86_64-asm_64.S"
14425@@ -284,6 +285,7 @@ __twofish_enc_blk8:
14426 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
14427 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
14428
14429+ pax_force_retaddr
14430 ret;
14431 ENDPROC(__twofish_enc_blk8)
14432
14433@@ -324,6 +326,7 @@ __twofish_dec_blk8:
14434 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
14435 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
14436
14437+ pax_force_retaddr
14438 ret;
14439 ENDPROC(__twofish_dec_blk8)
14440
14441@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
14442
14443 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14444
14445+ pax_force_retaddr
14446 ret;
14447 ENDPROC(twofish_ecb_enc_8way)
14448
14449@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
14450
14451 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14452
14453+ pax_force_retaddr
14454 ret;
14455 ENDPROC(twofish_ecb_dec_8way)
14456
14457@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
14458 * %rdx: src
14459 */
14460
14461- pushq %r12;
14462+ pushq %r14;
14463
14464 movq %rsi, %r11;
14465- movq %rdx, %r12;
14466+ movq %rdx, %r14;
14467
14468 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14469
14470 call __twofish_dec_blk8;
14471
14472- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14473+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14474
14475- popq %r12;
14476+ popq %r14;
14477
14478+ pax_force_retaddr
14479 ret;
14480 ENDPROC(twofish_cbc_dec_8way)
14481
14482@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
14483 * %rcx: iv (little endian, 128bit)
14484 */
14485
14486- pushq %r12;
14487+ pushq %r14;
14488
14489 movq %rsi, %r11;
14490- movq %rdx, %r12;
14491+ movq %rdx, %r14;
14492
14493 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14494 RD2, RX0, RX1, RY0);
14495
14496 call __twofish_enc_blk8;
14497
14498- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14499+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14500
14501- popq %r12;
14502+ popq %r14;
14503
14504+ pax_force_retaddr
14505 ret;
14506 ENDPROC(twofish_ctr_8way)
14507
14508@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
14509 /* dst <= regs xor IVs(in dst) */
14510 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14511
14512+ pax_force_retaddr
14513 ret;
14514 ENDPROC(twofish_xts_enc_8way)
14515
14516@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
14517 /* dst <= regs xor IVs(in dst) */
14518 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14519
14520+ pax_force_retaddr
14521 ret;
14522 ENDPROC(twofish_xts_dec_8way)
14523diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14524index 1c3b7ce..02f578d 100644
14525--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14526+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14527@@ -21,6 +21,7 @@
14528 */
14529
14530 #include <linux/linkage.h>
14531+#include <asm/alternative-asm.h>
14532
14533 .file "twofish-x86_64-asm-3way.S"
14534 .text
14535@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
14536 popq %r13;
14537 popq %r14;
14538 popq %r15;
14539+ pax_force_retaddr
14540 ret;
14541
14542 .L__enc_xor3:
14543@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
14544 popq %r13;
14545 popq %r14;
14546 popq %r15;
14547+ pax_force_retaddr
14548 ret;
14549 ENDPROC(__twofish_enc_blk_3way)
14550
14551@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
14552 popq %r13;
14553 popq %r14;
14554 popq %r15;
14555+ pax_force_retaddr
14556 ret;
14557 ENDPROC(twofish_dec_blk_3way)
14558diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
14559index a039d21..524b8b2 100644
14560--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
14561+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
14562@@ -22,6 +22,7 @@
14563
14564 #include <linux/linkage.h>
14565 #include <asm/asm-offsets.h>
14566+#include <asm/alternative-asm.h>
14567
14568 #define a_offset 0
14569 #define b_offset 4
14570@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
14571
14572 popq R1
14573 movq $1,%rax
14574+ pax_force_retaddr
14575 ret
14576 ENDPROC(twofish_enc_blk)
14577
14578@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
14579
14580 popq R1
14581 movq $1,%rax
14582+ pax_force_retaddr
14583 ret
14584 ENDPROC(twofish_dec_blk)
14585diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
14586index ae6aad1..719d6d9 100644
14587--- a/arch/x86/ia32/ia32_aout.c
14588+++ b/arch/x86/ia32/ia32_aout.c
14589@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
14590 unsigned long dump_start, dump_size;
14591 struct user32 dump;
14592
14593+ memset(&dump, 0, sizeof(dump));
14594+
14595 fs = get_fs();
14596 set_fs(KERNEL_DS);
14597 has_dumped = 1;
14598diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
14599index d0165c9..0d5639b 100644
14600--- a/arch/x86/ia32/ia32_signal.c
14601+++ b/arch/x86/ia32/ia32_signal.c
14602@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
14603 if (__get_user(set.sig[0], &frame->sc.oldmask)
14604 || (_COMPAT_NSIG_WORDS > 1
14605 && __copy_from_user((((char *) &set.sig) + 4),
14606- &frame->extramask,
14607+ frame->extramask,
14608 sizeof(frame->extramask))))
14609 goto badframe;
14610
14611@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
14612 sp -= frame_size;
14613 /* Align the stack pointer according to the i386 ABI,
14614 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
14615- sp = ((sp + 4) & -16ul) - 4;
14616+ sp = ((sp - 12) & -16ul) - 4;
14617 return (void __user *) sp;
14618 }
14619
14620@@ -383,10 +383,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14621 } else {
14622 /* Return stub is in 32bit vsyscall page */
14623 if (current->mm->context.vdso)
14624- restorer = current->mm->context.vdso +
14625- selected_vdso32->sym___kernel_sigreturn;
14626+ restorer = (void __force_user *)(current->mm->context.vdso +
14627+ selected_vdso32->sym___kernel_sigreturn);
14628 else
14629- restorer = &frame->retcode;
14630+ restorer = frame->retcode;
14631 }
14632
14633 put_user_try {
14634@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14635 * These are actually not used anymore, but left because some
14636 * gdb versions depend on them as a marker.
14637 */
14638- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14639+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14640 } put_user_catch(err);
14641
14642 if (err)
14643@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14644 0xb8,
14645 __NR_ia32_rt_sigreturn,
14646 0x80cd,
14647- 0,
14648+ 0
14649 };
14650
14651 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
14652@@ -461,16 +461,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14653
14654 if (ksig->ka.sa.sa_flags & SA_RESTORER)
14655 restorer = ksig->ka.sa.sa_restorer;
14656+ else if (current->mm->context.vdso)
14657+ /* Return stub is in 32bit vsyscall page */
14658+ restorer = (void __force_user *)(current->mm->context.vdso +
14659+ selected_vdso32->sym___kernel_rt_sigreturn);
14660 else
14661- restorer = current->mm->context.vdso +
14662- selected_vdso32->sym___kernel_rt_sigreturn;
14663+ restorer = frame->retcode;
14664 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
14665
14666 /*
14667 * Not actually used anymore, but left because some gdb
14668 * versions need it.
14669 */
14670- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14671+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14672 } put_user_catch(err);
14673
14674 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
14675diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
14676index 156ebca..9591cf0 100644
14677--- a/arch/x86/ia32/ia32entry.S
14678+++ b/arch/x86/ia32/ia32entry.S
14679@@ -15,8 +15,10 @@
14680 #include <asm/irqflags.h>
14681 #include <asm/asm.h>
14682 #include <asm/smap.h>
14683+#include <asm/pgtable.h>
14684 #include <linux/linkage.h>
14685 #include <linux/err.h>
14686+#include <asm/alternative-asm.h>
14687
14688 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
14689 #include <linux/elf-em.h>
14690@@ -62,12 +64,12 @@
14691 */
14692 .macro LOAD_ARGS32 offset, _r9=0
14693 .if \_r9
14694- movl \offset+16(%rsp),%r9d
14695+ movl \offset+R9(%rsp),%r9d
14696 .endif
14697- movl \offset+40(%rsp),%ecx
14698- movl \offset+48(%rsp),%edx
14699- movl \offset+56(%rsp),%esi
14700- movl \offset+64(%rsp),%edi
14701+ movl \offset+RCX(%rsp),%ecx
14702+ movl \offset+RDX(%rsp),%edx
14703+ movl \offset+RSI(%rsp),%esi
14704+ movl \offset+RDI(%rsp),%edi
14705 movl %eax,%eax /* zero extension */
14706 .endm
14707
14708@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
14709 ENDPROC(native_irq_enable_sysexit)
14710 #endif
14711
14712+ .macro pax_enter_kernel_user
14713+ pax_set_fptr_mask
14714+#ifdef CONFIG_PAX_MEMORY_UDEREF
14715+ call pax_enter_kernel_user
14716+#endif
14717+ .endm
14718+
14719+ .macro pax_exit_kernel_user
14720+#ifdef CONFIG_PAX_MEMORY_UDEREF
14721+ call pax_exit_kernel_user
14722+#endif
14723+#ifdef CONFIG_PAX_RANDKSTACK
14724+ pushq %rax
14725+ pushq %r11
14726+ call pax_randomize_kstack
14727+ popq %r11
14728+ popq %rax
14729+#endif
14730+ .endm
14731+
14732+ .macro pax_erase_kstack
14733+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14734+ call pax_erase_kstack
14735+#endif
14736+ .endm
14737+
14738 /*
14739 * 32bit SYSENTER instruction entry.
14740 *
14741@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
14742 CFI_REGISTER rsp,rbp
14743 SWAPGS_UNSAFE_STACK
14744 movq PER_CPU_VAR(kernel_stack), %rsp
14745- addq $(KERNEL_STACK_OFFSET),%rsp
14746- /*
14747- * No need to follow this irqs on/off section: the syscall
14748- * disabled irqs, here we enable it straight after entry:
14749- */
14750- ENABLE_INTERRUPTS(CLBR_NONE)
14751 movl %ebp,%ebp /* zero extension */
14752 pushq_cfi $__USER32_DS
14753 /*CFI_REL_OFFSET ss,0*/
14754@@ -135,23 +157,46 @@ ENTRY(ia32_sysenter_target)
14755 CFI_REL_OFFSET rsp,0
14756 pushfq_cfi
14757 /*CFI_REL_OFFSET rflags,0*/
14758- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
14759- CFI_REGISTER rip,r10
14760+ orl $X86_EFLAGS_IF,(%rsp)
14761+ GET_THREAD_INFO(%r11)
14762+ movl TI_sysenter_return(%r11), %r11d
14763+ CFI_REGISTER rip,r11
14764 pushq_cfi $__USER32_CS
14765 /*CFI_REL_OFFSET cs,0*/
14766 movl %eax, %eax
14767- pushq_cfi %r10
14768+ pushq_cfi %r11
14769 CFI_REL_OFFSET rip,0
14770 pushq_cfi %rax
14771 cld
14772 SAVE_ARGS 0,1,0
14773+ pax_enter_kernel_user
14774+
14775+#ifdef CONFIG_PAX_RANDKSTACK
14776+ pax_erase_kstack
14777+#endif
14778+
14779+ /*
14780+ * No need to follow this irqs on/off section: the syscall
14781+ * disabled irqs, here we enable it straight after entry:
14782+ */
14783+ ENABLE_INTERRUPTS(CLBR_NONE)
14784 /* no need to do an access_ok check here because rbp has been
14785 32bit zero extended */
14786+
14787+#ifdef CONFIG_PAX_MEMORY_UDEREF
14788+ addq pax_user_shadow_base,%rbp
14789+ ASM_PAX_OPEN_USERLAND
14790+#endif
14791+
14792 ASM_STAC
14793 1: movl (%rbp),%ebp
14794 _ASM_EXTABLE(1b,ia32_badarg)
14795 ASM_CLAC
14796
14797+#ifdef CONFIG_PAX_MEMORY_UDEREF
14798+ ASM_PAX_CLOSE_USERLAND
14799+#endif
14800+
14801 /*
14802 * Sysenter doesn't filter flags, so we need to clear NT
14803 * ourselves. To save a few cycles, we can check whether
14804@@ -161,8 +206,9 @@ ENTRY(ia32_sysenter_target)
14805 jnz sysenter_fix_flags
14806 sysenter_flags_fixed:
14807
14808- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14809- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14810+ GET_THREAD_INFO(%r11)
14811+ orl $TS_COMPAT,TI_status(%r11)
14812+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14813 CFI_REMEMBER_STATE
14814 jnz sysenter_tracesys
14815 cmpq $(IA32_NR_syscalls-1),%rax
14816@@ -172,14 +218,17 @@ sysenter_do_call:
14817 sysenter_dispatch:
14818 call *ia32_sys_call_table(,%rax,8)
14819 movq %rax,RAX-ARGOFFSET(%rsp)
14820+ GET_THREAD_INFO(%r11)
14821 DISABLE_INTERRUPTS(CLBR_NONE)
14822 TRACE_IRQS_OFF
14823- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14824+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14825 jnz sysexit_audit
14826 sysexit_from_sys_call:
14827- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14828+ pax_exit_kernel_user
14829+ pax_erase_kstack
14830+ andl $~TS_COMPAT,TI_status(%r11)
14831 /* clear IF, that popfq doesn't enable interrupts early */
14832- andl $~0x200,EFLAGS-ARGOFFSET(%rsp)
14833+ andl $~X86_EFLAGS_IF,EFLAGS-ARGOFFSET(%rsp)
14834 movl RIP-ARGOFFSET(%rsp),%edx /* User %eip */
14835 CFI_REGISTER rip,rdx
14836 RESTORE_ARGS 0,24,0,0,0,0
14837@@ -205,6 +254,9 @@ sysexit_from_sys_call:
14838 movl %ebx,%esi /* 2nd arg: 1st syscall arg */
14839 movl %eax,%edi /* 1st arg: syscall number */
14840 call __audit_syscall_entry
14841+
14842+ pax_erase_kstack
14843+
14844 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
14845 cmpq $(IA32_NR_syscalls-1),%rax
14846 ja ia32_badsys
14847@@ -216,7 +268,7 @@ sysexit_from_sys_call:
14848 .endm
14849
14850 .macro auditsys_exit exit
14851- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14852+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14853 jnz ia32_ret_from_sys_call
14854 TRACE_IRQS_ON
14855 ENABLE_INTERRUPTS(CLBR_NONE)
14856@@ -227,11 +279,12 @@ sysexit_from_sys_call:
14857 1: setbe %al /* 1 if error, 0 if not */
14858 movzbl %al,%edi /* zero-extend that into %edi */
14859 call __audit_syscall_exit
14860+ GET_THREAD_INFO(%r11)
14861 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
14862 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
14863 DISABLE_INTERRUPTS(CLBR_NONE)
14864 TRACE_IRQS_OFF
14865- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14866+ testl %edi,TI_flags(%r11)
14867 jz \exit
14868 CLEAR_RREGS -ARGOFFSET
14869 jmp int_with_check
14870@@ -253,7 +306,7 @@ sysenter_fix_flags:
14871
14872 sysenter_tracesys:
14873 #ifdef CONFIG_AUDITSYSCALL
14874- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14875+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14876 jz sysenter_auditsys
14877 #endif
14878 SAVE_REST
14879@@ -265,6 +318,9 @@ sysenter_tracesys:
14880 RESTORE_REST
14881 cmpq $(IA32_NR_syscalls-1),%rax
14882 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
14883+
14884+ pax_erase_kstack
14885+
14886 jmp sysenter_do_call
14887 CFI_ENDPROC
14888 ENDPROC(ia32_sysenter_target)
14889@@ -292,19 +348,25 @@ ENDPROC(ia32_sysenter_target)
14890 ENTRY(ia32_cstar_target)
14891 CFI_STARTPROC32 simple
14892 CFI_SIGNAL_FRAME
14893- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14894+ CFI_DEF_CFA rsp,0
14895 CFI_REGISTER rip,rcx
14896 /*CFI_REGISTER rflags,r11*/
14897 SWAPGS_UNSAFE_STACK
14898 movl %esp,%r8d
14899 CFI_REGISTER rsp,r8
14900 movq PER_CPU_VAR(kernel_stack),%rsp
14901+ SAVE_ARGS 8*6,0,0
14902+ pax_enter_kernel_user
14903+
14904+#ifdef CONFIG_PAX_RANDKSTACK
14905+ pax_erase_kstack
14906+#endif
14907+
14908 /*
14909 * No need to follow this irqs on/off section: the syscall
14910 * disabled irqs and here we enable it straight after entry:
14911 */
14912 ENABLE_INTERRUPTS(CLBR_NONE)
14913- SAVE_ARGS 8,0,0
14914 movl %eax,%eax /* zero extension */
14915 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14916 movq %rcx,RIP-ARGOFFSET(%rsp)
14917@@ -320,12 +382,25 @@ ENTRY(ia32_cstar_target)
14918 /* no need to do an access_ok check here because r8 has been
14919 32bit zero extended */
14920 /* hardware stack frame is complete now */
14921+
14922+#ifdef CONFIG_PAX_MEMORY_UDEREF
14923+ ASM_PAX_OPEN_USERLAND
14924+ movq pax_user_shadow_base,%r8
14925+ addq RSP-ARGOFFSET(%rsp),%r8
14926+#endif
14927+
14928 ASM_STAC
14929 1: movl (%r8),%r9d
14930 _ASM_EXTABLE(1b,ia32_badarg)
14931 ASM_CLAC
14932- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14933- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14934+
14935+#ifdef CONFIG_PAX_MEMORY_UDEREF
14936+ ASM_PAX_CLOSE_USERLAND
14937+#endif
14938+
14939+ GET_THREAD_INFO(%r11)
14940+ orl $TS_COMPAT,TI_status(%r11)
14941+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14942 CFI_REMEMBER_STATE
14943 jnz cstar_tracesys
14944 cmpq $IA32_NR_syscalls-1,%rax
14945@@ -335,13 +410,16 @@ cstar_do_call:
14946 cstar_dispatch:
14947 call *ia32_sys_call_table(,%rax,8)
14948 movq %rax,RAX-ARGOFFSET(%rsp)
14949+ GET_THREAD_INFO(%r11)
14950 DISABLE_INTERRUPTS(CLBR_NONE)
14951 TRACE_IRQS_OFF
14952- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14953+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14954 jnz sysretl_audit
14955 sysretl_from_sys_call:
14956- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14957- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
14958+ pax_exit_kernel_user
14959+ pax_erase_kstack
14960+ andl $~TS_COMPAT,TI_status(%r11)
14961+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
14962 movl RIP-ARGOFFSET(%rsp),%ecx
14963 CFI_REGISTER rip,rcx
14964 movl EFLAGS-ARGOFFSET(%rsp),%r11d
14965@@ -368,7 +446,7 @@ sysretl_audit:
14966
14967 cstar_tracesys:
14968 #ifdef CONFIG_AUDITSYSCALL
14969- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14970+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14971 jz cstar_auditsys
14972 #endif
14973 xchgl %r9d,%ebp
14974@@ -382,11 +460,19 @@ cstar_tracesys:
14975 xchgl %ebp,%r9d
14976 cmpq $(IA32_NR_syscalls-1),%rax
14977 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
14978+
14979+ pax_erase_kstack
14980+
14981 jmp cstar_do_call
14982 END(ia32_cstar_target)
14983
14984 ia32_badarg:
14985 ASM_CLAC
14986+
14987+#ifdef CONFIG_PAX_MEMORY_UDEREF
14988+ ASM_PAX_CLOSE_USERLAND
14989+#endif
14990+
14991 movq $-EFAULT,%rax
14992 jmp ia32_sysret
14993 CFI_ENDPROC
14994@@ -423,19 +509,26 @@ ENTRY(ia32_syscall)
14995 CFI_REL_OFFSET rip,RIP-RIP
14996 PARAVIRT_ADJUST_EXCEPTION_FRAME
14997 SWAPGS
14998- /*
14999- * No need to follow this irqs on/off section: the syscall
15000- * disabled irqs and here we enable it straight after entry:
15001- */
15002- ENABLE_INTERRUPTS(CLBR_NONE)
15003 movl %eax,%eax
15004 pushq_cfi %rax
15005 cld
15006 /* note the registers are not zero extended to the sf.
15007 this could be a problem. */
15008 SAVE_ARGS 0,1,0
15009- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15010- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15011+ pax_enter_kernel_user
15012+
15013+#ifdef CONFIG_PAX_RANDKSTACK
15014+ pax_erase_kstack
15015+#endif
15016+
15017+ /*
15018+ * No need to follow this irqs on/off section: the syscall
15019+ * disabled irqs and here we enable it straight after entry:
15020+ */
15021+ ENABLE_INTERRUPTS(CLBR_NONE)
15022+ GET_THREAD_INFO(%r11)
15023+ orl $TS_COMPAT,TI_status(%r11)
15024+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15025 jnz ia32_tracesys
15026 cmpq $(IA32_NR_syscalls-1),%rax
15027 ja ia32_badsys
15028@@ -458,6 +551,9 @@ ia32_tracesys:
15029 RESTORE_REST
15030 cmpq $(IA32_NR_syscalls-1),%rax
15031 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
15032+
15033+ pax_erase_kstack
15034+
15035 jmp ia32_do_call
15036 END(ia32_syscall)
15037
15038diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
15039index 8e0ceec..af13504 100644
15040--- a/arch/x86/ia32/sys_ia32.c
15041+++ b/arch/x86/ia32/sys_ia32.c
15042@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
15043 */
15044 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
15045 {
15046- typeof(ubuf->st_uid) uid = 0;
15047- typeof(ubuf->st_gid) gid = 0;
15048+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
15049+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
15050 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
15051 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
15052 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
15053diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
15054index 372231c..51b537d 100644
15055--- a/arch/x86/include/asm/alternative-asm.h
15056+++ b/arch/x86/include/asm/alternative-asm.h
15057@@ -18,6 +18,45 @@
15058 .endm
15059 #endif
15060
15061+#ifdef KERNEXEC_PLUGIN
15062+ .macro pax_force_retaddr_bts rip=0
15063+ btsq $63,\rip(%rsp)
15064+ .endm
15065+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15066+ .macro pax_force_retaddr rip=0, reload=0
15067+ btsq $63,\rip(%rsp)
15068+ .endm
15069+ .macro pax_force_fptr ptr
15070+ btsq $63,\ptr
15071+ .endm
15072+ .macro pax_set_fptr_mask
15073+ .endm
15074+#endif
15075+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15076+ .macro pax_force_retaddr rip=0, reload=0
15077+ .if \reload
15078+ pax_set_fptr_mask
15079+ .endif
15080+ orq %r12,\rip(%rsp)
15081+ .endm
15082+ .macro pax_force_fptr ptr
15083+ orq %r12,\ptr
15084+ .endm
15085+ .macro pax_set_fptr_mask
15086+ movabs $0x8000000000000000,%r12
15087+ .endm
15088+#endif
15089+#else
15090+ .macro pax_force_retaddr rip=0, reload=0
15091+ .endm
15092+ .macro pax_force_fptr ptr
15093+ .endm
15094+ .macro pax_force_retaddr_bts rip=0
15095+ .endm
15096+ .macro pax_set_fptr_mask
15097+ .endm
15098+#endif
15099+
15100 .macro altinstruction_entry orig alt feature orig_len alt_len
15101 .long \orig - .
15102 .long \alt - .
15103diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
15104index 473bdbe..b1e3377 100644
15105--- a/arch/x86/include/asm/alternative.h
15106+++ b/arch/x86/include/asm/alternative.h
15107@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15108 ".pushsection .discard,\"aw\",@progbits\n" \
15109 DISCARD_ENTRY(1) \
15110 ".popsection\n" \
15111- ".pushsection .altinstr_replacement, \"ax\"\n" \
15112+ ".pushsection .altinstr_replacement, \"a\"\n" \
15113 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
15114 ".popsection"
15115
15116@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15117 DISCARD_ENTRY(1) \
15118 DISCARD_ENTRY(2) \
15119 ".popsection\n" \
15120- ".pushsection .altinstr_replacement, \"ax\"\n" \
15121+ ".pushsection .altinstr_replacement, \"a\"\n" \
15122 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
15123 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
15124 ".popsection"
15125diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
15126index efc3b22..85c4f3a 100644
15127--- a/arch/x86/include/asm/apic.h
15128+++ b/arch/x86/include/asm/apic.h
15129@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
15130
15131 #ifdef CONFIG_X86_LOCAL_APIC
15132
15133-extern unsigned int apic_verbosity;
15134+extern int apic_verbosity;
15135 extern int local_apic_timer_c2_ok;
15136
15137 extern int disable_apic;
15138diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
15139index 20370c6..a2eb9b0 100644
15140--- a/arch/x86/include/asm/apm.h
15141+++ b/arch/x86/include/asm/apm.h
15142@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
15143 __asm__ __volatile__(APM_DO_ZERO_SEGS
15144 "pushl %%edi\n\t"
15145 "pushl %%ebp\n\t"
15146- "lcall *%%cs:apm_bios_entry\n\t"
15147+ "lcall *%%ss:apm_bios_entry\n\t"
15148 "setc %%al\n\t"
15149 "popl %%ebp\n\t"
15150 "popl %%edi\n\t"
15151@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
15152 __asm__ __volatile__(APM_DO_ZERO_SEGS
15153 "pushl %%edi\n\t"
15154 "pushl %%ebp\n\t"
15155- "lcall *%%cs:apm_bios_entry\n\t"
15156+ "lcall *%%ss:apm_bios_entry\n\t"
15157 "setc %%bl\n\t"
15158 "popl %%ebp\n\t"
15159 "popl %%edi\n\t"
15160diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
15161index 5e5cd12..51cdc93 100644
15162--- a/arch/x86/include/asm/atomic.h
15163+++ b/arch/x86/include/asm/atomic.h
15164@@ -28,6 +28,17 @@ static inline int atomic_read(const atomic_t *v)
15165 }
15166
15167 /**
15168+ * atomic_read_unchecked - read atomic variable
15169+ * @v: pointer of type atomic_unchecked_t
15170+ *
15171+ * Atomically reads the value of @v.
15172+ */
15173+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
15174+{
15175+ return ACCESS_ONCE((v)->counter);
15176+}
15177+
15178+/**
15179 * atomic_set - set atomic variable
15180 * @v: pointer of type atomic_t
15181 * @i: required value
15182@@ -40,6 +51,18 @@ static inline void atomic_set(atomic_t *v, int i)
15183 }
15184
15185 /**
15186+ * atomic_set_unchecked - set atomic variable
15187+ * @v: pointer of type atomic_unchecked_t
15188+ * @i: required value
15189+ *
15190+ * Atomically sets the value of @v to @i.
15191+ */
15192+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
15193+{
15194+ v->counter = i;
15195+}
15196+
15197+/**
15198 * atomic_add - add integer to atomic variable
15199 * @i: integer value to add
15200 * @v: pointer of type atomic_t
15201@@ -48,7 +71,29 @@ static inline void atomic_set(atomic_t *v, int i)
15202 */
15203 static inline void atomic_add(int i, atomic_t *v)
15204 {
15205- asm volatile(LOCK_PREFIX "addl %1,%0"
15206+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15207+
15208+#ifdef CONFIG_PAX_REFCOUNT
15209+ "jno 0f\n"
15210+ LOCK_PREFIX "subl %1,%0\n"
15211+ "int $4\n0:\n"
15212+ _ASM_EXTABLE(0b, 0b)
15213+#endif
15214+
15215+ : "+m" (v->counter)
15216+ : "ir" (i));
15217+}
15218+
15219+/**
15220+ * atomic_add_unchecked - add integer to atomic variable
15221+ * @i: integer value to add
15222+ * @v: pointer of type atomic_unchecked_t
15223+ *
15224+ * Atomically adds @i to @v.
15225+ */
15226+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
15227+{
15228+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15229 : "+m" (v->counter)
15230 : "ir" (i));
15231 }
15232@@ -62,7 +107,29 @@ static inline void atomic_add(int i, atomic_t *v)
15233 */
15234 static inline void atomic_sub(int i, atomic_t *v)
15235 {
15236- asm volatile(LOCK_PREFIX "subl %1,%0"
15237+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15238+
15239+#ifdef CONFIG_PAX_REFCOUNT
15240+ "jno 0f\n"
15241+ LOCK_PREFIX "addl %1,%0\n"
15242+ "int $4\n0:\n"
15243+ _ASM_EXTABLE(0b, 0b)
15244+#endif
15245+
15246+ : "+m" (v->counter)
15247+ : "ir" (i));
15248+}
15249+
15250+/**
15251+ * atomic_sub_unchecked - subtract integer from atomic variable
15252+ * @i: integer value to subtract
15253+ * @v: pointer of type atomic_unchecked_t
15254+ *
15255+ * Atomically subtracts @i from @v.
15256+ */
15257+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
15258+{
15259+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15260 : "+m" (v->counter)
15261 : "ir" (i));
15262 }
15263@@ -78,7 +145,7 @@ static inline void atomic_sub(int i, atomic_t *v)
15264 */
15265 static inline int atomic_sub_and_test(int i, atomic_t *v)
15266 {
15267- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
15268+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
15269 }
15270
15271 /**
15272@@ -89,7 +156,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
15273 */
15274 static inline void atomic_inc(atomic_t *v)
15275 {
15276- asm volatile(LOCK_PREFIX "incl %0"
15277+ asm volatile(LOCK_PREFIX "incl %0\n"
15278+
15279+#ifdef CONFIG_PAX_REFCOUNT
15280+ "jno 0f\n"
15281+ LOCK_PREFIX "decl %0\n"
15282+ "int $4\n0:\n"
15283+ _ASM_EXTABLE(0b, 0b)
15284+#endif
15285+
15286+ : "+m" (v->counter));
15287+}
15288+
15289+/**
15290+ * atomic_inc_unchecked - increment atomic variable
15291+ * @v: pointer of type atomic_unchecked_t
15292+ *
15293+ * Atomically increments @v by 1.
15294+ */
15295+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
15296+{
15297+ asm volatile(LOCK_PREFIX "incl %0\n"
15298 : "+m" (v->counter));
15299 }
15300
15301@@ -101,7 +188,27 @@ static inline void atomic_inc(atomic_t *v)
15302 */
15303 static inline void atomic_dec(atomic_t *v)
15304 {
15305- asm volatile(LOCK_PREFIX "decl %0"
15306+ asm volatile(LOCK_PREFIX "decl %0\n"
15307+
15308+#ifdef CONFIG_PAX_REFCOUNT
15309+ "jno 0f\n"
15310+ LOCK_PREFIX "incl %0\n"
15311+ "int $4\n0:\n"
15312+ _ASM_EXTABLE(0b, 0b)
15313+#endif
15314+
15315+ : "+m" (v->counter));
15316+}
15317+
15318+/**
15319+ * atomic_dec_unchecked - decrement atomic variable
15320+ * @v: pointer of type atomic_unchecked_t
15321+ *
15322+ * Atomically decrements @v by 1.
15323+ */
15324+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
15325+{
15326+ asm volatile(LOCK_PREFIX "decl %0\n"
15327 : "+m" (v->counter));
15328 }
15329
15330@@ -115,7 +222,7 @@ static inline void atomic_dec(atomic_t *v)
15331 */
15332 static inline int atomic_dec_and_test(atomic_t *v)
15333 {
15334- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
15335+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
15336 }
15337
15338 /**
15339@@ -128,7 +235,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
15340 */
15341 static inline int atomic_inc_and_test(atomic_t *v)
15342 {
15343- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
15344+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
15345+}
15346+
15347+/**
15348+ * atomic_inc_and_test_unchecked - increment and test
15349+ * @v: pointer of type atomic_unchecked_t
15350+ *
15351+ * Atomically increments @v by 1
15352+ * and returns true if the result is zero, or false for all
15353+ * other cases.
15354+ */
15355+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
15356+{
15357+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
15358 }
15359
15360 /**
15361@@ -142,7 +262,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
15362 */
15363 static inline int atomic_add_negative(int i, atomic_t *v)
15364 {
15365- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
15366+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
15367 }
15368
15369 /**
15370@@ -152,7 +272,19 @@ static inline int atomic_add_negative(int i, atomic_t *v)
15371 *
15372 * Atomically adds @i to @v and returns @i + @v
15373 */
15374-static inline int atomic_add_return(int i, atomic_t *v)
15375+static inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v)
15376+{
15377+ return i + xadd_check_overflow(&v->counter, i);
15378+}
15379+
15380+/**
15381+ * atomic_add_return_unchecked - add integer and return
15382+ * @i: integer value to add
15383+ * @v: pointer of type atomic_unchecked_t
15384+ *
15385+ * Atomically adds @i to @v and returns @i + @v
15386+ */
15387+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
15388 {
15389 return i + xadd(&v->counter, i);
15390 }
15391@@ -164,15 +296,24 @@ static inline int atomic_add_return(int i, atomic_t *v)
15392 *
15393 * Atomically subtracts @i from @v and returns @v - @i
15394 */
15395-static inline int atomic_sub_return(int i, atomic_t *v)
15396+static inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v)
15397 {
15398 return atomic_add_return(-i, v);
15399 }
15400
15401 #define atomic_inc_return(v) (atomic_add_return(1, v))
15402+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
15403+{
15404+ return atomic_add_return_unchecked(1, v);
15405+}
15406 #define atomic_dec_return(v) (atomic_sub_return(1, v))
15407
15408-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
15409+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
15410+{
15411+ return cmpxchg(&v->counter, old, new);
15412+}
15413+
15414+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
15415 {
15416 return cmpxchg(&v->counter, old, new);
15417 }
15418@@ -182,6 +323,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
15419 return xchg(&v->counter, new);
15420 }
15421
15422+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
15423+{
15424+ return xchg(&v->counter, new);
15425+}
15426+
15427 /**
15428 * __atomic_add_unless - add unless the number is already a given value
15429 * @v: pointer of type atomic_t
15430@@ -193,12 +339,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
15431 */
15432 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15433 {
15434- int c, old;
15435+ int c, old, new;
15436 c = atomic_read(v);
15437 for (;;) {
15438- if (unlikely(c == (u)))
15439+ if (unlikely(c == u))
15440 break;
15441- old = atomic_cmpxchg((v), c, c + (a));
15442+
15443+ asm volatile("addl %2,%0\n"
15444+
15445+#ifdef CONFIG_PAX_REFCOUNT
15446+ "jno 0f\n"
15447+ "subl %2,%0\n"
15448+ "int $4\n0:\n"
15449+ _ASM_EXTABLE(0b, 0b)
15450+#endif
15451+
15452+ : "=r" (new)
15453+ : "0" (c), "ir" (a));
15454+
15455+ old = atomic_cmpxchg(v, c, new);
15456 if (likely(old == c))
15457 break;
15458 c = old;
15459@@ -207,6 +366,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15460 }
15461
15462 /**
15463+ * atomic_inc_not_zero_hint - increment if not null
15464+ * @v: pointer of type atomic_t
15465+ * @hint: probable value of the atomic before the increment
15466+ *
15467+ * This version of atomic_inc_not_zero() gives a hint of probable
15468+ * value of the atomic. This helps processor to not read the memory
15469+ * before doing the atomic read/modify/write cycle, lowering
15470+ * number of bus transactions on some arches.
15471+ *
15472+ * Returns: 0 if increment was not done, 1 otherwise.
15473+ */
15474+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
15475+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
15476+{
15477+ int val, c = hint, new;
15478+
15479+ /* sanity test, should be removed by compiler if hint is a constant */
15480+ if (!hint)
15481+ return __atomic_add_unless(v, 1, 0);
15482+
15483+ do {
15484+ asm volatile("incl %0\n"
15485+
15486+#ifdef CONFIG_PAX_REFCOUNT
15487+ "jno 0f\n"
15488+ "decl %0\n"
15489+ "int $4\n0:\n"
15490+ _ASM_EXTABLE(0b, 0b)
15491+#endif
15492+
15493+ : "=r" (new)
15494+ : "0" (c));
15495+
15496+ val = atomic_cmpxchg(v, c, new);
15497+ if (val == c)
15498+ return 1;
15499+ c = val;
15500+ } while (c);
15501+
15502+ return 0;
15503+}
15504+
15505+/**
15506 * atomic_inc_short - increment of a short integer
15507 * @v: pointer to type int
15508 *
15509@@ -220,14 +422,37 @@ static inline short int atomic_inc_short(short int *v)
15510 }
15511
15512 /* These are x86-specific, used by some header files */
15513-#define atomic_clear_mask(mask, addr) \
15514- asm volatile(LOCK_PREFIX "andl %0,%1" \
15515- : : "r" (~(mask)), "m" (*(addr)) : "memory")
15516+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
15517+{
15518+ asm volatile(LOCK_PREFIX "andl %1,%0"
15519+ : "+m" (v->counter)
15520+ : "r" (~(mask))
15521+ : "memory");
15522+}
15523
15524-#define atomic_set_mask(mask, addr) \
15525- asm volatile(LOCK_PREFIX "orl %0,%1" \
15526- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
15527- : "memory")
15528+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15529+{
15530+ asm volatile(LOCK_PREFIX "andl %1,%0"
15531+ : "+m" (v->counter)
15532+ : "r" (~(mask))
15533+ : "memory");
15534+}
15535+
15536+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
15537+{
15538+ asm volatile(LOCK_PREFIX "orl %1,%0"
15539+ : "+m" (v->counter)
15540+ : "r" (mask)
15541+ : "memory");
15542+}
15543+
15544+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15545+{
15546+ asm volatile(LOCK_PREFIX "orl %1,%0"
15547+ : "+m" (v->counter)
15548+ : "r" (mask)
15549+ : "memory");
15550+}
15551
15552 #ifdef CONFIG_X86_32
15553 # include <asm/atomic64_32.h>
15554diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
15555index b154de7..bf18a5a 100644
15556--- a/arch/x86/include/asm/atomic64_32.h
15557+++ b/arch/x86/include/asm/atomic64_32.h
15558@@ -12,6 +12,14 @@ typedef struct {
15559 u64 __aligned(8) counter;
15560 } atomic64_t;
15561
15562+#ifdef CONFIG_PAX_REFCOUNT
15563+typedef struct {
15564+ u64 __aligned(8) counter;
15565+} atomic64_unchecked_t;
15566+#else
15567+typedef atomic64_t atomic64_unchecked_t;
15568+#endif
15569+
15570 #define ATOMIC64_INIT(val) { (val) }
15571
15572 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
15573@@ -37,21 +45,31 @@ typedef struct {
15574 ATOMIC64_DECL_ONE(sym##_386)
15575
15576 ATOMIC64_DECL_ONE(add_386);
15577+ATOMIC64_DECL_ONE(add_unchecked_386);
15578 ATOMIC64_DECL_ONE(sub_386);
15579+ATOMIC64_DECL_ONE(sub_unchecked_386);
15580 ATOMIC64_DECL_ONE(inc_386);
15581+ATOMIC64_DECL_ONE(inc_unchecked_386);
15582 ATOMIC64_DECL_ONE(dec_386);
15583+ATOMIC64_DECL_ONE(dec_unchecked_386);
15584 #endif
15585
15586 #define alternative_atomic64(f, out, in...) \
15587 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
15588
15589 ATOMIC64_DECL(read);
15590+ATOMIC64_DECL(read_unchecked);
15591 ATOMIC64_DECL(set);
15592+ATOMIC64_DECL(set_unchecked);
15593 ATOMIC64_DECL(xchg);
15594 ATOMIC64_DECL(add_return);
15595+ATOMIC64_DECL(add_return_unchecked);
15596 ATOMIC64_DECL(sub_return);
15597+ATOMIC64_DECL(sub_return_unchecked);
15598 ATOMIC64_DECL(inc_return);
15599+ATOMIC64_DECL(inc_return_unchecked);
15600 ATOMIC64_DECL(dec_return);
15601+ATOMIC64_DECL(dec_return_unchecked);
15602 ATOMIC64_DECL(dec_if_positive);
15603 ATOMIC64_DECL(inc_not_zero);
15604 ATOMIC64_DECL(add_unless);
15605@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
15606 }
15607
15608 /**
15609+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
15610+ * @p: pointer to type atomic64_unchecked_t
15611+ * @o: expected value
15612+ * @n: new value
15613+ *
15614+ * Atomically sets @v to @n if it was equal to @o and returns
15615+ * the old value.
15616+ */
15617+
15618+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
15619+{
15620+ return cmpxchg64(&v->counter, o, n);
15621+}
15622+
15623+/**
15624 * atomic64_xchg - xchg atomic64 variable
15625 * @v: pointer to type atomic64_t
15626 * @n: value to assign
15627@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
15628 }
15629
15630 /**
15631+ * atomic64_set_unchecked - set atomic64 variable
15632+ * @v: pointer to type atomic64_unchecked_t
15633+ * @n: value to assign
15634+ *
15635+ * Atomically sets the value of @v to @n.
15636+ */
15637+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
15638+{
15639+ unsigned high = (unsigned)(i >> 32);
15640+ unsigned low = (unsigned)i;
15641+ alternative_atomic64(set, /* no output */,
15642+ "S" (v), "b" (low), "c" (high)
15643+ : "eax", "edx", "memory");
15644+}
15645+
15646+/**
15647 * atomic64_read - read atomic64 variable
15648 * @v: pointer to type atomic64_t
15649 *
15650@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
15651 }
15652
15653 /**
15654+ * atomic64_read_unchecked - read atomic64 variable
15655+ * @v: pointer to type atomic64_unchecked_t
15656+ *
15657+ * Atomically reads the value of @v and returns it.
15658+ */
15659+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
15660+{
15661+ long long r;
15662+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
15663+ return r;
15664+ }
15665+
15666+/**
15667 * atomic64_add_return - add and return
15668 * @i: integer value to add
15669 * @v: pointer to type atomic64_t
15670@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
15671 return i;
15672 }
15673
15674+/**
15675+ * atomic64_add_return_unchecked - add and return
15676+ * @i: integer value to add
15677+ * @v: pointer to type atomic64_unchecked_t
15678+ *
15679+ * Atomically adds @i to @v and returns @i + *@v
15680+ */
15681+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
15682+{
15683+ alternative_atomic64(add_return_unchecked,
15684+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15685+ ASM_NO_INPUT_CLOBBER("memory"));
15686+ return i;
15687+}
15688+
15689 /*
15690 * Other variants with different arithmetic operators:
15691 */
15692@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
15693 return a;
15694 }
15695
15696+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15697+{
15698+ long long a;
15699+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
15700+ "S" (v) : "memory", "ecx");
15701+ return a;
15702+}
15703+
15704 static inline long long atomic64_dec_return(atomic64_t *v)
15705 {
15706 long long a;
15707@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
15708 }
15709
15710 /**
15711+ * atomic64_add_unchecked - add integer to atomic64 variable
15712+ * @i: integer value to add
15713+ * @v: pointer to type atomic64_unchecked_t
15714+ *
15715+ * Atomically adds @i to @v.
15716+ */
15717+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
15718+{
15719+ __alternative_atomic64(add_unchecked, add_return_unchecked,
15720+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15721+ ASM_NO_INPUT_CLOBBER("memory"));
15722+ return i;
15723+}
15724+
15725+/**
15726 * atomic64_sub - subtract the atomic64 variable
15727 * @i: integer value to subtract
15728 * @v: pointer to type atomic64_t
15729diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
15730index f8d273e..02f39f3 100644
15731--- a/arch/x86/include/asm/atomic64_64.h
15732+++ b/arch/x86/include/asm/atomic64_64.h
15733@@ -22,6 +22,18 @@ static inline long atomic64_read(const atomic64_t *v)
15734 }
15735
15736 /**
15737+ * atomic64_read_unchecked - read atomic64 variable
15738+ * @v: pointer of type atomic64_unchecked_t
15739+ *
15740+ * Atomically reads the value of @v.
15741+ * Doesn't imply a read memory barrier.
15742+ */
15743+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
15744+{
15745+ return ACCESS_ONCE((v)->counter);
15746+}
15747+
15748+/**
15749 * atomic64_set - set atomic64 variable
15750 * @v: pointer to type atomic64_t
15751 * @i: required value
15752@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
15753 }
15754
15755 /**
15756+ * atomic64_set_unchecked - set atomic64 variable
15757+ * @v: pointer to type atomic64_unchecked_t
15758+ * @i: required value
15759+ *
15760+ * Atomically sets the value of @v to @i.
15761+ */
15762+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
15763+{
15764+ v->counter = i;
15765+}
15766+
15767+/**
15768 * atomic64_add - add integer to atomic64 variable
15769 * @i: integer value to add
15770 * @v: pointer to type atomic64_t
15771@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
15772 */
15773 static inline void atomic64_add(long i, atomic64_t *v)
15774 {
15775+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
15776+
15777+#ifdef CONFIG_PAX_REFCOUNT
15778+ "jno 0f\n"
15779+ LOCK_PREFIX "subq %1,%0\n"
15780+ "int $4\n0:\n"
15781+ _ASM_EXTABLE(0b, 0b)
15782+#endif
15783+
15784+ : "=m" (v->counter)
15785+ : "er" (i), "m" (v->counter));
15786+}
15787+
15788+/**
15789+ * atomic64_add_unchecked - add integer to atomic64 variable
15790+ * @i: integer value to add
15791+ * @v: pointer to type atomic64_unchecked_t
15792+ *
15793+ * Atomically adds @i to @v.
15794+ */
15795+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
15796+{
15797 asm volatile(LOCK_PREFIX "addq %1,%0"
15798 : "=m" (v->counter)
15799 : "er" (i), "m" (v->counter));
15800@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
15801 */
15802 static inline void atomic64_sub(long i, atomic64_t *v)
15803 {
15804- asm volatile(LOCK_PREFIX "subq %1,%0"
15805+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15806+
15807+#ifdef CONFIG_PAX_REFCOUNT
15808+ "jno 0f\n"
15809+ LOCK_PREFIX "addq %1,%0\n"
15810+ "int $4\n0:\n"
15811+ _ASM_EXTABLE(0b, 0b)
15812+#endif
15813+
15814+ : "=m" (v->counter)
15815+ : "er" (i), "m" (v->counter));
15816+}
15817+
15818+/**
15819+ * atomic64_sub_unchecked - subtract the atomic64 variable
15820+ * @i: integer value to subtract
15821+ * @v: pointer to type atomic64_unchecked_t
15822+ *
15823+ * Atomically subtracts @i from @v.
15824+ */
15825+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
15826+{
15827+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15828 : "=m" (v->counter)
15829 : "er" (i), "m" (v->counter));
15830 }
15831@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
15832 */
15833 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15834 {
15835- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
15836+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
15837 }
15838
15839 /**
15840@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15841 */
15842 static inline void atomic64_inc(atomic64_t *v)
15843 {
15844+ asm volatile(LOCK_PREFIX "incq %0\n"
15845+
15846+#ifdef CONFIG_PAX_REFCOUNT
15847+ "jno 0f\n"
15848+ LOCK_PREFIX "decq %0\n"
15849+ "int $4\n0:\n"
15850+ _ASM_EXTABLE(0b, 0b)
15851+#endif
15852+
15853+ : "=m" (v->counter)
15854+ : "m" (v->counter));
15855+}
15856+
15857+/**
15858+ * atomic64_inc_unchecked - increment atomic64 variable
15859+ * @v: pointer to type atomic64_unchecked_t
15860+ *
15861+ * Atomically increments @v by 1.
15862+ */
15863+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
15864+{
15865 asm volatile(LOCK_PREFIX "incq %0"
15866 : "=m" (v->counter)
15867 : "m" (v->counter));
15868@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
15869 */
15870 static inline void atomic64_dec(atomic64_t *v)
15871 {
15872- asm volatile(LOCK_PREFIX "decq %0"
15873+ asm volatile(LOCK_PREFIX "decq %0\n"
15874+
15875+#ifdef CONFIG_PAX_REFCOUNT
15876+ "jno 0f\n"
15877+ LOCK_PREFIX "incq %0\n"
15878+ "int $4\n0:\n"
15879+ _ASM_EXTABLE(0b, 0b)
15880+#endif
15881+
15882+ : "=m" (v->counter)
15883+ : "m" (v->counter));
15884+}
15885+
15886+/**
15887+ * atomic64_dec_unchecked - decrement atomic64 variable
15888+ * @v: pointer to type atomic64_t
15889+ *
15890+ * Atomically decrements @v by 1.
15891+ */
15892+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
15893+{
15894+ asm volatile(LOCK_PREFIX "decq %0\n"
15895 : "=m" (v->counter)
15896 : "m" (v->counter));
15897 }
15898@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
15899 */
15900 static inline int atomic64_dec_and_test(atomic64_t *v)
15901 {
15902- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
15903+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
15904 }
15905
15906 /**
15907@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
15908 */
15909 static inline int atomic64_inc_and_test(atomic64_t *v)
15910 {
15911- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
15912+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
15913 }
15914
15915 /**
15916@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
15917 */
15918 static inline int atomic64_add_negative(long i, atomic64_t *v)
15919 {
15920- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
15921+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
15922 }
15923
15924 /**
15925@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
15926 */
15927 static inline long atomic64_add_return(long i, atomic64_t *v)
15928 {
15929+ return i + xadd_check_overflow(&v->counter, i);
15930+}
15931+
15932+/**
15933+ * atomic64_add_return_unchecked - add and return
15934+ * @i: integer value to add
15935+ * @v: pointer to type atomic64_unchecked_t
15936+ *
15937+ * Atomically adds @i to @v and returns @i + @v
15938+ */
15939+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
15940+{
15941 return i + xadd(&v->counter, i);
15942 }
15943
15944@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
15945 }
15946
15947 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
15948+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15949+{
15950+ return atomic64_add_return_unchecked(1, v);
15951+}
15952 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
15953
15954 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15955@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15956 return cmpxchg(&v->counter, old, new);
15957 }
15958
15959+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
15960+{
15961+ return cmpxchg(&v->counter, old, new);
15962+}
15963+
15964 static inline long atomic64_xchg(atomic64_t *v, long new)
15965 {
15966 return xchg(&v->counter, new);
15967@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
15968 */
15969 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
15970 {
15971- long c, old;
15972+ long c, old, new;
15973 c = atomic64_read(v);
15974 for (;;) {
15975- if (unlikely(c == (u)))
15976+ if (unlikely(c == u))
15977 break;
15978- old = atomic64_cmpxchg((v), c, c + (a));
15979+
15980+ asm volatile("add %2,%0\n"
15981+
15982+#ifdef CONFIG_PAX_REFCOUNT
15983+ "jno 0f\n"
15984+ "sub %2,%0\n"
15985+ "int $4\n0:\n"
15986+ _ASM_EXTABLE(0b, 0b)
15987+#endif
15988+
15989+ : "=r" (new)
15990+ : "0" (c), "ir" (a));
15991+
15992+ old = atomic64_cmpxchg(v, c, new);
15993 if (likely(old == c))
15994 break;
15995 c = old;
15996 }
15997- return c != (u);
15998+ return c != u;
15999 }
16000
16001 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
16002diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
16003index 2ab1eb3..1e8cc5d 100644
16004--- a/arch/x86/include/asm/barrier.h
16005+++ b/arch/x86/include/asm/barrier.h
16006@@ -57,7 +57,7 @@
16007 do { \
16008 compiletime_assert_atomic_type(*p); \
16009 smp_mb(); \
16010- ACCESS_ONCE(*p) = (v); \
16011+ ACCESS_ONCE_RW(*p) = (v); \
16012 } while (0)
16013
16014 #define smp_load_acquire(p) \
16015@@ -74,7 +74,7 @@ do { \
16016 do { \
16017 compiletime_assert_atomic_type(*p); \
16018 barrier(); \
16019- ACCESS_ONCE(*p) = (v); \
16020+ ACCESS_ONCE_RW(*p) = (v); \
16021 } while (0)
16022
16023 #define smp_load_acquire(p) \
16024diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
16025index cfe3b95..d01b118 100644
16026--- a/arch/x86/include/asm/bitops.h
16027+++ b/arch/x86/include/asm/bitops.h
16028@@ -50,7 +50,7 @@
16029 * a mask operation on a byte.
16030 */
16031 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
16032-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
16033+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
16034 #define CONST_MASK(nr) (1 << ((nr) & 7))
16035
16036 /**
16037@@ -203,7 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
16038 */
16039 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
16040 {
16041- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16042+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16043 }
16044
16045 /**
16046@@ -249,7 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
16047 */
16048 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
16049 {
16050- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16051+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16052 }
16053
16054 /**
16055@@ -302,7 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
16056 */
16057 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
16058 {
16059- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16060+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16061 }
16062
16063 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
16064@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
16065 *
16066 * Undefined if no bit exists, so code should check against 0 first.
16067 */
16068-static inline unsigned long __ffs(unsigned long word)
16069+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
16070 {
16071 asm("rep; bsf %1,%0"
16072 : "=r" (word)
16073@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
16074 *
16075 * Undefined if no zero exists, so code should check against ~0UL first.
16076 */
16077-static inline unsigned long ffz(unsigned long word)
16078+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
16079 {
16080 asm("rep; bsf %1,%0"
16081 : "=r" (word)
16082@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
16083 *
16084 * Undefined if no set bit exists, so code should check against 0 first.
16085 */
16086-static inline unsigned long __fls(unsigned long word)
16087+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
16088 {
16089 asm("bsr %1,%0"
16090 : "=r" (word)
16091@@ -434,7 +434,7 @@ static inline int ffs(int x)
16092 * set bit if value is nonzero. The last (most significant) bit is
16093 * at position 32.
16094 */
16095-static inline int fls(int x)
16096+static inline int __intentional_overflow(-1) fls(int x)
16097 {
16098 int r;
16099
16100@@ -476,7 +476,7 @@ static inline int fls(int x)
16101 * at position 64.
16102 */
16103 #ifdef CONFIG_X86_64
16104-static __always_inline int fls64(__u64 x)
16105+static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
16106 {
16107 int bitpos = -1;
16108 /*
16109diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
16110index 4fa687a..60f2d39 100644
16111--- a/arch/x86/include/asm/boot.h
16112+++ b/arch/x86/include/asm/boot.h
16113@@ -6,10 +6,15 @@
16114 #include <uapi/asm/boot.h>
16115
16116 /* Physical address where kernel should be loaded. */
16117-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16118+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16119 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16120 & ~(CONFIG_PHYSICAL_ALIGN - 1))
16121
16122+#ifndef __ASSEMBLY__
16123+extern unsigned char __LOAD_PHYSICAL_ADDR[];
16124+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
16125+#endif
16126+
16127 /* Minimum kernel alignment, as a power of two */
16128 #ifdef CONFIG_X86_64
16129 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
16130diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
16131index 48f99f1..d78ebf9 100644
16132--- a/arch/x86/include/asm/cache.h
16133+++ b/arch/x86/include/asm/cache.h
16134@@ -5,12 +5,13 @@
16135
16136 /* L1 cache line size */
16137 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
16138-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
16139+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
16140
16141 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
16142+#define __read_only __attribute__((__section__(".data..read_only")))
16143
16144 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
16145-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
16146+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
16147
16148 #ifdef CONFIG_X86_VSMP
16149 #ifdef CONFIG_SMP
16150diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
16151index 1f1297b..72b8439 100644
16152--- a/arch/x86/include/asm/calling.h
16153+++ b/arch/x86/include/asm/calling.h
16154@@ -82,106 +82,117 @@ For 32-bit we have the following conventions - kernel is built with
16155 #define RSP 152
16156 #define SS 160
16157
16158-#define ARGOFFSET R11
16159+#define ARGOFFSET R15
16160
16161 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
16162- subq $9*8+\addskip, %rsp
16163- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
16164- movq_cfi rdi, 8*8
16165- movq_cfi rsi, 7*8
16166- movq_cfi rdx, 6*8
16167+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
16168+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
16169+ movq_cfi rdi, RDI
16170+ movq_cfi rsi, RSI
16171+ movq_cfi rdx, RDX
16172
16173 .if \save_rcx
16174- movq_cfi rcx, 5*8
16175+ movq_cfi rcx, RCX
16176 .endif
16177
16178 .if \rax_enosys
16179- movq $-ENOSYS, 4*8(%rsp)
16180+ movq $-ENOSYS, RAX(%rsp)
16181 .else
16182- movq_cfi rax, 4*8
16183+ movq_cfi rax, RAX
16184 .endif
16185
16186 .if \save_r891011
16187- movq_cfi r8, 3*8
16188- movq_cfi r9, 2*8
16189- movq_cfi r10, 1*8
16190- movq_cfi r11, 0*8
16191+ movq_cfi r8, R8
16192+ movq_cfi r9, R9
16193+ movq_cfi r10, R10
16194+ movq_cfi r11, R11
16195 .endif
16196
16197+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16198+ movq_cfi r12, R12
16199+#endif
16200+
16201 .endm
16202
16203-#define ARG_SKIP (9*8)
16204+#define ARG_SKIP ORIG_RAX
16205
16206 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
16207 rstor_r8910=1, rstor_rdx=1
16208+
16209+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16210+ movq_cfi_restore R12, r12
16211+#endif
16212+
16213 .if \rstor_r11
16214- movq_cfi_restore 0*8, r11
16215+ movq_cfi_restore R11, r11
16216 .endif
16217
16218 .if \rstor_r8910
16219- movq_cfi_restore 1*8, r10
16220- movq_cfi_restore 2*8, r9
16221- movq_cfi_restore 3*8, r8
16222+ movq_cfi_restore R10, r10
16223+ movq_cfi_restore R9, r9
16224+ movq_cfi_restore R8, r8
16225 .endif
16226
16227 .if \rstor_rax
16228- movq_cfi_restore 4*8, rax
16229+ movq_cfi_restore RAX, rax
16230 .endif
16231
16232 .if \rstor_rcx
16233- movq_cfi_restore 5*8, rcx
16234+ movq_cfi_restore RCX, rcx
16235 .endif
16236
16237 .if \rstor_rdx
16238- movq_cfi_restore 6*8, rdx
16239+ movq_cfi_restore RDX, rdx
16240 .endif
16241
16242- movq_cfi_restore 7*8, rsi
16243- movq_cfi_restore 8*8, rdi
16244+ movq_cfi_restore RSI, rsi
16245+ movq_cfi_restore RDI, rdi
16246
16247- .if ARG_SKIP+\addskip > 0
16248- addq $ARG_SKIP+\addskip, %rsp
16249- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
16250+ .if ORIG_RAX+\addskip > 0
16251+ addq $ORIG_RAX+\addskip, %rsp
16252+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
16253 .endif
16254 .endm
16255
16256- .macro LOAD_ARGS offset, skiprax=0
16257- movq \offset(%rsp), %r11
16258- movq \offset+8(%rsp), %r10
16259- movq \offset+16(%rsp), %r9
16260- movq \offset+24(%rsp), %r8
16261- movq \offset+40(%rsp), %rcx
16262- movq \offset+48(%rsp), %rdx
16263- movq \offset+56(%rsp), %rsi
16264- movq \offset+64(%rsp), %rdi
16265+ .macro LOAD_ARGS skiprax=0
16266+ movq R11(%rsp), %r11
16267+ movq R10(%rsp), %r10
16268+ movq R9(%rsp), %r9
16269+ movq R8(%rsp), %r8
16270+ movq RCX(%rsp), %rcx
16271+ movq RDX(%rsp), %rdx
16272+ movq RSI(%rsp), %rsi
16273+ movq RDI(%rsp), %rdi
16274 .if \skiprax
16275 .else
16276- movq \offset+72(%rsp), %rax
16277+ movq ORIG_RAX(%rsp), %rax
16278 .endif
16279 .endm
16280
16281-#define REST_SKIP (6*8)
16282-
16283 .macro SAVE_REST
16284- subq $REST_SKIP, %rsp
16285- CFI_ADJUST_CFA_OFFSET REST_SKIP
16286- movq_cfi rbx, 5*8
16287- movq_cfi rbp, 4*8
16288- movq_cfi r12, 3*8
16289- movq_cfi r13, 2*8
16290- movq_cfi r14, 1*8
16291- movq_cfi r15, 0*8
16292+ movq_cfi rbx, RBX
16293+ movq_cfi rbp, RBP
16294+
16295+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16296+ movq_cfi r12, R12
16297+#endif
16298+
16299+ movq_cfi r13, R13
16300+ movq_cfi r14, R14
16301+ movq_cfi r15, R15
16302 .endm
16303
16304 .macro RESTORE_REST
16305- movq_cfi_restore 0*8, r15
16306- movq_cfi_restore 1*8, r14
16307- movq_cfi_restore 2*8, r13
16308- movq_cfi_restore 3*8, r12
16309- movq_cfi_restore 4*8, rbp
16310- movq_cfi_restore 5*8, rbx
16311- addq $REST_SKIP, %rsp
16312- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
16313+ movq_cfi_restore R15, r15
16314+ movq_cfi_restore R14, r14
16315+ movq_cfi_restore R13, r13
16316+
16317+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16318+ movq_cfi_restore R12, r12
16319+#endif
16320+
16321+ movq_cfi_restore RBP, rbp
16322+ movq_cfi_restore RBX, rbx
16323 .endm
16324
16325 .macro SAVE_ALL
16326diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
16327index f50de69..2b0a458 100644
16328--- a/arch/x86/include/asm/checksum_32.h
16329+++ b/arch/x86/include/asm/checksum_32.h
16330@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
16331 int len, __wsum sum,
16332 int *src_err_ptr, int *dst_err_ptr);
16333
16334+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
16335+ int len, __wsum sum,
16336+ int *src_err_ptr, int *dst_err_ptr);
16337+
16338+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
16339+ int len, __wsum sum,
16340+ int *src_err_ptr, int *dst_err_ptr);
16341+
16342 /*
16343 * Note: when you get a NULL pointer exception here this means someone
16344 * passed in an incorrect kernel address to one of these functions.
16345@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
16346
16347 might_sleep();
16348 stac();
16349- ret = csum_partial_copy_generic((__force void *)src, dst,
16350+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
16351 len, sum, err_ptr, NULL);
16352 clac();
16353
16354@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
16355 might_sleep();
16356 if (access_ok(VERIFY_WRITE, dst, len)) {
16357 stac();
16358- ret = csum_partial_copy_generic(src, (__force void *)dst,
16359+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
16360 len, sum, NULL, err_ptr);
16361 clac();
16362 return ret;
16363diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
16364index 99c105d7..2f667ac 100644
16365--- a/arch/x86/include/asm/cmpxchg.h
16366+++ b/arch/x86/include/asm/cmpxchg.h
16367@@ -16,8 +16,12 @@ extern void __cmpxchg_wrong_size(void)
16368 __compiletime_error("Bad argument size for cmpxchg");
16369 extern void __xadd_wrong_size(void)
16370 __compiletime_error("Bad argument size for xadd");
16371+extern void __xadd_check_overflow_wrong_size(void)
16372+ __compiletime_error("Bad argument size for xadd_check_overflow");
16373 extern void __add_wrong_size(void)
16374 __compiletime_error("Bad argument size for add");
16375+extern void __add_check_overflow_wrong_size(void)
16376+ __compiletime_error("Bad argument size for add_check_overflow");
16377
16378 /*
16379 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
16380@@ -69,6 +73,38 @@ extern void __add_wrong_size(void)
16381 __ret; \
16382 })
16383
16384+#ifdef CONFIG_PAX_REFCOUNT
16385+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
16386+ ({ \
16387+ __typeof__ (*(ptr)) __ret = (arg); \
16388+ switch (sizeof(*(ptr))) { \
16389+ case __X86_CASE_L: \
16390+ asm volatile (lock #op "l %0, %1\n" \
16391+ "jno 0f\n" \
16392+ "mov %0,%1\n" \
16393+ "int $4\n0:\n" \
16394+ _ASM_EXTABLE(0b, 0b) \
16395+ : "+r" (__ret), "+m" (*(ptr)) \
16396+ : : "memory", "cc"); \
16397+ break; \
16398+ case __X86_CASE_Q: \
16399+ asm volatile (lock #op "q %q0, %1\n" \
16400+ "jno 0f\n" \
16401+ "mov %0,%1\n" \
16402+ "int $4\n0:\n" \
16403+ _ASM_EXTABLE(0b, 0b) \
16404+ : "+r" (__ret), "+m" (*(ptr)) \
16405+ : : "memory", "cc"); \
16406+ break; \
16407+ default: \
16408+ __ ## op ## _check_overflow_wrong_size(); \
16409+ } \
16410+ __ret; \
16411+ })
16412+#else
16413+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
16414+#endif
16415+
16416 /*
16417 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16418 * Since this is generally used to protect other memory information, we
16419@@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
16420 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
16421 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
16422
16423+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
16424+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
16425+
16426 #define __add(ptr, inc, lock) \
16427 ({ \
16428 __typeof__ (*(ptr)) __ret = (inc); \
16429diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
16430index 59c6c40..5e0b22c 100644
16431--- a/arch/x86/include/asm/compat.h
16432+++ b/arch/x86/include/asm/compat.h
16433@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
16434 typedef u32 compat_uint_t;
16435 typedef u32 compat_ulong_t;
16436 typedef u64 __attribute__((aligned(4))) compat_u64;
16437-typedef u32 compat_uptr_t;
16438+typedef u32 __user compat_uptr_t;
16439
16440 struct compat_timespec {
16441 compat_time_t tv_sec;
16442diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
16443index 90a5485..43b6211 100644
16444--- a/arch/x86/include/asm/cpufeature.h
16445+++ b/arch/x86/include/asm/cpufeature.h
16446@@ -213,7 +213,7 @@
16447 #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
16448 #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
16449 #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
16450-
16451+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
16452
16453 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
16454 #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
16455@@ -221,7 +221,7 @@
16456 #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
16457 #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
16458 #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
16459-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
16460+#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Prevention */
16461 #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
16462 #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
16463 #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
16464@@ -390,6 +390,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
16465 #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
16466 #define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
16467 #define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
16468+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
16469
16470 #if __GNUC__ >= 4
16471 extern void warn_pre_alternatives(void);
16472@@ -441,7 +442,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16473
16474 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
16475 t_warn:
16476- warn_pre_alternatives();
16477+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
16478+ warn_pre_alternatives();
16479 return false;
16480 #endif
16481
16482@@ -461,7 +463,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16483 ".section .discard,\"aw\",@progbits\n"
16484 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16485 ".previous\n"
16486- ".section .altinstr_replacement,\"ax\"\n"
16487+ ".section .altinstr_replacement,\"a\"\n"
16488 "3: movb $1,%0\n"
16489 "4:\n"
16490 ".previous\n"
16491@@ -498,7 +500,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16492 " .byte 2b - 1b\n" /* src len */
16493 " .byte 4f - 3f\n" /* repl len */
16494 ".previous\n"
16495- ".section .altinstr_replacement,\"ax\"\n"
16496+ ".section .altinstr_replacement,\"a\"\n"
16497 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
16498 "4:\n"
16499 ".previous\n"
16500@@ -531,7 +533,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16501 ".section .discard,\"aw\",@progbits\n"
16502 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16503 ".previous\n"
16504- ".section .altinstr_replacement,\"ax\"\n"
16505+ ".section .altinstr_replacement,\"a\"\n"
16506 "3: movb $0,%0\n"
16507 "4:\n"
16508 ".previous\n"
16509@@ -545,7 +547,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16510 ".section .discard,\"aw\",@progbits\n"
16511 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
16512 ".previous\n"
16513- ".section .altinstr_replacement,\"ax\"\n"
16514+ ".section .altinstr_replacement,\"a\"\n"
16515 "5: movb $1,%0\n"
16516 "6:\n"
16517 ".previous\n"
16518diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
16519index a94b82e..59ecefa 100644
16520--- a/arch/x86/include/asm/desc.h
16521+++ b/arch/x86/include/asm/desc.h
16522@@ -4,6 +4,7 @@
16523 #include <asm/desc_defs.h>
16524 #include <asm/ldt.h>
16525 #include <asm/mmu.h>
16526+#include <asm/pgtable.h>
16527
16528 #include <linux/smp.h>
16529 #include <linux/percpu.h>
16530@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16531
16532 desc->type = (info->read_exec_only ^ 1) << 1;
16533 desc->type |= info->contents << 2;
16534+ desc->type |= info->seg_not_present ^ 1;
16535
16536 desc->s = 1;
16537 desc->dpl = 0x3;
16538@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16539 }
16540
16541 extern struct desc_ptr idt_descr;
16542-extern gate_desc idt_table[];
16543-extern struct desc_ptr debug_idt_descr;
16544-extern gate_desc debug_idt_table[];
16545-
16546-struct gdt_page {
16547- struct desc_struct gdt[GDT_ENTRIES];
16548-} __attribute__((aligned(PAGE_SIZE)));
16549-
16550-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
16551+extern gate_desc idt_table[IDT_ENTRIES];
16552+extern const struct desc_ptr debug_idt_descr;
16553+extern gate_desc debug_idt_table[IDT_ENTRIES];
16554
16555+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
16556 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
16557 {
16558- return per_cpu(gdt_page, cpu).gdt;
16559+ return cpu_gdt_table[cpu];
16560 }
16561
16562 #ifdef CONFIG_X86_64
16563@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
16564 unsigned long base, unsigned dpl, unsigned flags,
16565 unsigned short seg)
16566 {
16567- gate->a = (seg << 16) | (base & 0xffff);
16568- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
16569+ gate->gate.offset_low = base;
16570+ gate->gate.seg = seg;
16571+ gate->gate.reserved = 0;
16572+ gate->gate.type = type;
16573+ gate->gate.s = 0;
16574+ gate->gate.dpl = dpl;
16575+ gate->gate.p = 1;
16576+ gate->gate.offset_high = base >> 16;
16577 }
16578
16579 #endif
16580@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
16581
16582 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
16583 {
16584+ pax_open_kernel();
16585 memcpy(&idt[entry], gate, sizeof(*gate));
16586+ pax_close_kernel();
16587 }
16588
16589 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
16590 {
16591+ pax_open_kernel();
16592 memcpy(&ldt[entry], desc, 8);
16593+ pax_close_kernel();
16594 }
16595
16596 static inline void
16597@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
16598 default: size = sizeof(*gdt); break;
16599 }
16600
16601+ pax_open_kernel();
16602 memcpy(&gdt[entry], desc, size);
16603+ pax_close_kernel();
16604 }
16605
16606 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
16607@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
16608
16609 static inline void native_load_tr_desc(void)
16610 {
16611+ pax_open_kernel();
16612 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
16613+ pax_close_kernel();
16614 }
16615
16616 static inline void native_load_gdt(const struct desc_ptr *dtr)
16617@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
16618 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
16619 unsigned int i;
16620
16621+ pax_open_kernel();
16622 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
16623 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
16624+ pax_close_kernel();
16625 }
16626
16627 /* This intentionally ignores lm, since 32-bit apps don't have that field. */
16628@@ -295,7 +308,7 @@ static inline void load_LDT(mm_context_t *pc)
16629 preempt_enable();
16630 }
16631
16632-static inline unsigned long get_desc_base(const struct desc_struct *desc)
16633+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
16634 {
16635 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
16636 }
16637@@ -319,7 +332,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
16638 }
16639
16640 #ifdef CONFIG_X86_64
16641-static inline void set_nmi_gate(int gate, void *addr)
16642+static inline void set_nmi_gate(int gate, const void *addr)
16643 {
16644 gate_desc s;
16645
16646@@ -329,14 +342,14 @@ static inline void set_nmi_gate(int gate, void *addr)
16647 #endif
16648
16649 #ifdef CONFIG_TRACING
16650-extern struct desc_ptr trace_idt_descr;
16651-extern gate_desc trace_idt_table[];
16652+extern const struct desc_ptr trace_idt_descr;
16653+extern gate_desc trace_idt_table[IDT_ENTRIES];
16654 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16655 {
16656 write_idt_entry(trace_idt_table, entry, gate);
16657 }
16658
16659-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
16660+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
16661 unsigned dpl, unsigned ist, unsigned seg)
16662 {
16663 gate_desc s;
16664@@ -356,7 +369,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16665 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
16666 #endif
16667
16668-static inline void _set_gate(int gate, unsigned type, void *addr,
16669+static inline void _set_gate(int gate, unsigned type, const void *addr,
16670 unsigned dpl, unsigned ist, unsigned seg)
16671 {
16672 gate_desc s;
16673@@ -379,9 +392,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
16674 #define set_intr_gate(n, addr) \
16675 do { \
16676 BUG_ON((unsigned)n > 0xFF); \
16677- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
16678+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
16679 __KERNEL_CS); \
16680- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
16681+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
16682 0, 0, __KERNEL_CS); \
16683 } while (0)
16684
16685@@ -409,19 +422,19 @@ static inline void alloc_system_vector(int vector)
16686 /*
16687 * This routine sets up an interrupt gate at directory privilege level 3.
16688 */
16689-static inline void set_system_intr_gate(unsigned int n, void *addr)
16690+static inline void set_system_intr_gate(unsigned int n, const void *addr)
16691 {
16692 BUG_ON((unsigned)n > 0xFF);
16693 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
16694 }
16695
16696-static inline void set_system_trap_gate(unsigned int n, void *addr)
16697+static inline void set_system_trap_gate(unsigned int n, const void *addr)
16698 {
16699 BUG_ON((unsigned)n > 0xFF);
16700 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
16701 }
16702
16703-static inline void set_trap_gate(unsigned int n, void *addr)
16704+static inline void set_trap_gate(unsigned int n, const void *addr)
16705 {
16706 BUG_ON((unsigned)n > 0xFF);
16707 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
16708@@ -430,16 +443,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
16709 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
16710 {
16711 BUG_ON((unsigned)n > 0xFF);
16712- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
16713+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
16714 }
16715
16716-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
16717+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
16718 {
16719 BUG_ON((unsigned)n > 0xFF);
16720 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
16721 }
16722
16723-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
16724+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
16725 {
16726 BUG_ON((unsigned)n > 0xFF);
16727 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
16728@@ -511,4 +524,17 @@ static inline void load_current_idt(void)
16729 else
16730 load_idt((const struct desc_ptr *)&idt_descr);
16731 }
16732+
16733+#ifdef CONFIG_X86_32
16734+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
16735+{
16736+ struct desc_struct d;
16737+
16738+ if (likely(limit))
16739+ limit = (limit - 1UL) >> PAGE_SHIFT;
16740+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
16741+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
16742+}
16743+#endif
16744+
16745 #endif /* _ASM_X86_DESC_H */
16746diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
16747index 278441f..b95a174 100644
16748--- a/arch/x86/include/asm/desc_defs.h
16749+++ b/arch/x86/include/asm/desc_defs.h
16750@@ -31,6 +31,12 @@ struct desc_struct {
16751 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
16752 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
16753 };
16754+ struct {
16755+ u16 offset_low;
16756+ u16 seg;
16757+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
16758+ unsigned offset_high: 16;
16759+ } gate;
16760 };
16761 } __attribute__((packed));
16762
16763diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
16764index ced283a..ffe04cc 100644
16765--- a/arch/x86/include/asm/div64.h
16766+++ b/arch/x86/include/asm/div64.h
16767@@ -39,7 +39,7 @@
16768 __mod; \
16769 })
16770
16771-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16772+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16773 {
16774 union {
16775 u64 v64;
16776diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
16777index ca3347a..1a5082a 100644
16778--- a/arch/x86/include/asm/elf.h
16779+++ b/arch/x86/include/asm/elf.h
16780@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
16781
16782 #include <asm/vdso.h>
16783
16784-#ifdef CONFIG_X86_64
16785-extern unsigned int vdso64_enabled;
16786-#endif
16787 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
16788 extern unsigned int vdso32_enabled;
16789 #endif
16790@@ -249,7 +246,25 @@ extern int force_personality32;
16791 the loader. We need to make sure that it is out of the way of the program
16792 that it will "exec", and that there is sufficient room for the brk. */
16793
16794+#ifdef CONFIG_PAX_SEGMEXEC
16795+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
16796+#else
16797 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
16798+#endif
16799+
16800+#ifdef CONFIG_PAX_ASLR
16801+#ifdef CONFIG_X86_32
16802+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
16803+
16804+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16805+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16806+#else
16807+#define PAX_ELF_ET_DYN_BASE 0x400000UL
16808+
16809+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16810+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16811+#endif
16812+#endif
16813
16814 /* This yields a mask that user programs can use to figure out what
16815 instruction set this CPU supports. This could be done in user space,
16816@@ -298,17 +313,13 @@ do { \
16817
16818 #define ARCH_DLINFO \
16819 do { \
16820- if (vdso64_enabled) \
16821- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16822- (unsigned long __force)current->mm->context.vdso); \
16823+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16824 } while (0)
16825
16826 /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
16827 #define ARCH_DLINFO_X32 \
16828 do { \
16829- if (vdso64_enabled) \
16830- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16831- (unsigned long __force)current->mm->context.vdso); \
16832+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16833 } while (0)
16834
16835 #define AT_SYSINFO 32
16836@@ -323,10 +334,10 @@ else \
16837
16838 #endif /* !CONFIG_X86_32 */
16839
16840-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
16841+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
16842
16843 #define VDSO_ENTRY \
16844- ((unsigned long)current->mm->context.vdso + \
16845+ (current->mm->context.vdso + \
16846 selected_vdso32->sym___kernel_vsyscall)
16847
16848 struct linux_binprm;
16849@@ -338,9 +349,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
16850 int uses_interp);
16851 #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
16852
16853-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
16854-#define arch_randomize_brk arch_randomize_brk
16855-
16856 /*
16857 * True on X86_32 or when emulating IA32 on X86_64
16858 */
16859diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
16860index 77a99ac..39ff7f5 100644
16861--- a/arch/x86/include/asm/emergency-restart.h
16862+++ b/arch/x86/include/asm/emergency-restart.h
16863@@ -1,6 +1,6 @@
16864 #ifndef _ASM_X86_EMERGENCY_RESTART_H
16865 #define _ASM_X86_EMERGENCY_RESTART_H
16866
16867-extern void machine_emergency_restart(void);
16868+extern void machine_emergency_restart(void) __noreturn;
16869
16870 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
16871diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
16872index 1c7eefe..d0e4702 100644
16873--- a/arch/x86/include/asm/floppy.h
16874+++ b/arch/x86/include/asm/floppy.h
16875@@ -229,18 +229,18 @@ static struct fd_routine_l {
16876 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
16877 } fd_routine[] = {
16878 {
16879- request_dma,
16880- free_dma,
16881- get_dma_residue,
16882- dma_mem_alloc,
16883- hard_dma_setup
16884+ ._request_dma = request_dma,
16885+ ._free_dma = free_dma,
16886+ ._get_dma_residue = get_dma_residue,
16887+ ._dma_mem_alloc = dma_mem_alloc,
16888+ ._dma_setup = hard_dma_setup
16889 },
16890 {
16891- vdma_request_dma,
16892- vdma_nop,
16893- vdma_get_dma_residue,
16894- vdma_mem_alloc,
16895- vdma_dma_setup
16896+ ._request_dma = vdma_request_dma,
16897+ ._free_dma = vdma_nop,
16898+ ._get_dma_residue = vdma_get_dma_residue,
16899+ ._dma_mem_alloc = vdma_mem_alloc,
16900+ ._dma_setup = vdma_dma_setup
16901 }
16902 };
16903
16904diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
16905index 72ba21a..79f3f66 100644
16906--- a/arch/x86/include/asm/fpu-internal.h
16907+++ b/arch/x86/include/asm/fpu-internal.h
16908@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16909 #define user_insn(insn, output, input...) \
16910 ({ \
16911 int err; \
16912+ pax_open_userland(); \
16913 asm volatile(ASM_STAC "\n" \
16914- "1:" #insn "\n\t" \
16915+ "1:" \
16916+ __copyuser_seg \
16917+ #insn "\n\t" \
16918 "2: " ASM_CLAC "\n" \
16919 ".section .fixup,\"ax\"\n" \
16920 "3: movl $-1,%[err]\n" \
16921@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16922 _ASM_EXTABLE(1b, 3b) \
16923 : [err] "=r" (err), output \
16924 : "0"(0), input); \
16925+ pax_close_userland(); \
16926 err; \
16927 })
16928
16929@@ -300,7 +304,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
16930 "fnclex\n\t"
16931 "emms\n\t"
16932 "fildl %P[addr]" /* set F?P to defined value */
16933- : : [addr] "m" (tsk->thread.fpu.has_fpu));
16934+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
16935 }
16936
16937 return fpu_restore_checking(&tsk->thread.fpu);
16938diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
16939index b4c1f54..e290c08 100644
16940--- a/arch/x86/include/asm/futex.h
16941+++ b/arch/x86/include/asm/futex.h
16942@@ -12,6 +12,7 @@
16943 #include <asm/smap.h>
16944
16945 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
16946+ typecheck(u32 __user *, uaddr); \
16947 asm volatile("\t" ASM_STAC "\n" \
16948 "1:\t" insn "\n" \
16949 "2:\t" ASM_CLAC "\n" \
16950@@ -20,15 +21,16 @@
16951 "\tjmp\t2b\n" \
16952 "\t.previous\n" \
16953 _ASM_EXTABLE(1b, 3b) \
16954- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
16955+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
16956 : "i" (-EFAULT), "0" (oparg), "1" (0))
16957
16958 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
16959+ typecheck(u32 __user *, uaddr); \
16960 asm volatile("\t" ASM_STAC "\n" \
16961 "1:\tmovl %2, %0\n" \
16962 "\tmovl\t%0, %3\n" \
16963 "\t" insn "\n" \
16964- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
16965+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
16966 "\tjnz\t1b\n" \
16967 "3:\t" ASM_CLAC "\n" \
16968 "\t.section .fixup,\"ax\"\n" \
16969@@ -38,7 +40,7 @@
16970 _ASM_EXTABLE(1b, 4b) \
16971 _ASM_EXTABLE(2b, 4b) \
16972 : "=&a" (oldval), "=&r" (ret), \
16973- "+m" (*uaddr), "=&r" (tem) \
16974+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
16975 : "r" (oparg), "i" (-EFAULT), "1" (0))
16976
16977 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16978@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16979
16980 pagefault_disable();
16981
16982+ pax_open_userland();
16983 switch (op) {
16984 case FUTEX_OP_SET:
16985- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
16986+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
16987 break;
16988 case FUTEX_OP_ADD:
16989- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
16990+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
16991 uaddr, oparg);
16992 break;
16993 case FUTEX_OP_OR:
16994@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16995 default:
16996 ret = -ENOSYS;
16997 }
16998+ pax_close_userland();
16999
17000 pagefault_enable();
17001
17002diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
17003index 9662290..49ca5e5 100644
17004--- a/arch/x86/include/asm/hw_irq.h
17005+++ b/arch/x86/include/asm/hw_irq.h
17006@@ -160,8 +160,8 @@ static inline void unlock_vector_lock(void) {}
17007 #endif /* CONFIG_X86_LOCAL_APIC */
17008
17009 /* Statistics */
17010-extern atomic_t irq_err_count;
17011-extern atomic_t irq_mis_count;
17012+extern atomic_unchecked_t irq_err_count;
17013+extern atomic_unchecked_t irq_mis_count;
17014
17015 /* EISA */
17016 extern void eisa_set_level_irq(unsigned int irq);
17017diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
17018index ccffa53..3c90c87 100644
17019--- a/arch/x86/include/asm/i8259.h
17020+++ b/arch/x86/include/asm/i8259.h
17021@@ -62,7 +62,7 @@ struct legacy_pic {
17022 void (*init)(int auto_eoi);
17023 int (*irq_pending)(unsigned int irq);
17024 void (*make_irq)(unsigned int irq);
17025-};
17026+} __do_const;
17027
17028 extern struct legacy_pic *legacy_pic;
17029 extern struct legacy_pic null_legacy_pic;
17030diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
17031index 34a5b93..27e40a6 100644
17032--- a/arch/x86/include/asm/io.h
17033+++ b/arch/x86/include/asm/io.h
17034@@ -52,12 +52,12 @@ static inline void name(type val, volatile void __iomem *addr) \
17035 "m" (*(volatile type __force *)addr) barrier); }
17036
17037 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
17038-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
17039-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
17040+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
17041+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
17042
17043 build_mmio_read(__readb, "b", unsigned char, "=q", )
17044-build_mmio_read(__readw, "w", unsigned short, "=r", )
17045-build_mmio_read(__readl, "l", unsigned int, "=r", )
17046+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
17047+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
17048
17049 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
17050 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
17051@@ -113,7 +113,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
17052 * this function
17053 */
17054
17055-static inline phys_addr_t virt_to_phys(volatile void *address)
17056+static inline phys_addr_t __intentional_overflow(-1) virt_to_phys(volatile void *address)
17057 {
17058 return __pa(address);
17059 }
17060@@ -189,7 +189,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
17061 return ioremap_nocache(offset, size);
17062 }
17063
17064-extern void iounmap(volatile void __iomem *addr);
17065+extern void iounmap(const volatile void __iomem *addr);
17066
17067 extern void set_iounmap_nonlazy(void);
17068
17069@@ -199,6 +199,17 @@ extern void set_iounmap_nonlazy(void);
17070
17071 #include <linux/vmalloc.h>
17072
17073+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
17074+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
17075+{
17076+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17077+}
17078+
17079+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
17080+{
17081+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17082+}
17083+
17084 /*
17085 * Convert a virtual cached pointer to an uncached pointer
17086 */
17087diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
17088index 0a8b519..80e7d5b 100644
17089--- a/arch/x86/include/asm/irqflags.h
17090+++ b/arch/x86/include/asm/irqflags.h
17091@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
17092 sti; \
17093 sysexit
17094
17095+#define GET_CR0_INTO_RDI mov %cr0, %rdi
17096+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
17097+#define GET_CR3_INTO_RDI mov %cr3, %rdi
17098+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
17099+
17100 #else
17101 #define INTERRUPT_RETURN iret
17102 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
17103diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
17104index 4421b5d..8543006 100644
17105--- a/arch/x86/include/asm/kprobes.h
17106+++ b/arch/x86/include/asm/kprobes.h
17107@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
17108 #define RELATIVEJUMP_SIZE 5
17109 #define RELATIVECALL_OPCODE 0xe8
17110 #define RELATIVE_ADDR_SIZE 4
17111-#define MAX_STACK_SIZE 64
17112-#define MIN_STACK_SIZE(ADDR) \
17113- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
17114- THREAD_SIZE - (unsigned long)(ADDR))) \
17115- ? (MAX_STACK_SIZE) \
17116- : (((unsigned long)current_thread_info()) + \
17117- THREAD_SIZE - (unsigned long)(ADDR)))
17118+#define MAX_STACK_SIZE 64UL
17119+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
17120
17121 #define flush_insn_slot(p) do { } while (0)
17122
17123diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
17124index 4ad6560..75c7bdd 100644
17125--- a/arch/x86/include/asm/local.h
17126+++ b/arch/x86/include/asm/local.h
17127@@ -10,33 +10,97 @@ typedef struct {
17128 atomic_long_t a;
17129 } local_t;
17130
17131+typedef struct {
17132+ atomic_long_unchecked_t a;
17133+} local_unchecked_t;
17134+
17135 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
17136
17137 #define local_read(l) atomic_long_read(&(l)->a)
17138+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
17139 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
17140+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
17141
17142 static inline void local_inc(local_t *l)
17143 {
17144- asm volatile(_ASM_INC "%0"
17145+ asm volatile(_ASM_INC "%0\n"
17146+
17147+#ifdef CONFIG_PAX_REFCOUNT
17148+ "jno 0f\n"
17149+ _ASM_DEC "%0\n"
17150+ "int $4\n0:\n"
17151+ _ASM_EXTABLE(0b, 0b)
17152+#endif
17153+
17154+ : "+m" (l->a.counter));
17155+}
17156+
17157+static inline void local_inc_unchecked(local_unchecked_t *l)
17158+{
17159+ asm volatile(_ASM_INC "%0\n"
17160 : "+m" (l->a.counter));
17161 }
17162
17163 static inline void local_dec(local_t *l)
17164 {
17165- asm volatile(_ASM_DEC "%0"
17166+ asm volatile(_ASM_DEC "%0\n"
17167+
17168+#ifdef CONFIG_PAX_REFCOUNT
17169+ "jno 0f\n"
17170+ _ASM_INC "%0\n"
17171+ "int $4\n0:\n"
17172+ _ASM_EXTABLE(0b, 0b)
17173+#endif
17174+
17175+ : "+m" (l->a.counter));
17176+}
17177+
17178+static inline void local_dec_unchecked(local_unchecked_t *l)
17179+{
17180+ asm volatile(_ASM_DEC "%0\n"
17181 : "+m" (l->a.counter));
17182 }
17183
17184 static inline void local_add(long i, local_t *l)
17185 {
17186- asm volatile(_ASM_ADD "%1,%0"
17187+ asm volatile(_ASM_ADD "%1,%0\n"
17188+
17189+#ifdef CONFIG_PAX_REFCOUNT
17190+ "jno 0f\n"
17191+ _ASM_SUB "%1,%0\n"
17192+ "int $4\n0:\n"
17193+ _ASM_EXTABLE(0b, 0b)
17194+#endif
17195+
17196+ : "+m" (l->a.counter)
17197+ : "ir" (i));
17198+}
17199+
17200+static inline void local_add_unchecked(long i, local_unchecked_t *l)
17201+{
17202+ asm volatile(_ASM_ADD "%1,%0\n"
17203 : "+m" (l->a.counter)
17204 : "ir" (i));
17205 }
17206
17207 static inline void local_sub(long i, local_t *l)
17208 {
17209- asm volatile(_ASM_SUB "%1,%0"
17210+ asm volatile(_ASM_SUB "%1,%0\n"
17211+
17212+#ifdef CONFIG_PAX_REFCOUNT
17213+ "jno 0f\n"
17214+ _ASM_ADD "%1,%0\n"
17215+ "int $4\n0:\n"
17216+ _ASM_EXTABLE(0b, 0b)
17217+#endif
17218+
17219+ : "+m" (l->a.counter)
17220+ : "ir" (i));
17221+}
17222+
17223+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
17224+{
17225+ asm volatile(_ASM_SUB "%1,%0\n"
17226 : "+m" (l->a.counter)
17227 : "ir" (i));
17228 }
17229@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
17230 */
17231 static inline int local_sub_and_test(long i, local_t *l)
17232 {
17233- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
17234+ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
17235 }
17236
17237 /**
17238@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
17239 */
17240 static inline int local_dec_and_test(local_t *l)
17241 {
17242- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
17243+ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
17244 }
17245
17246 /**
17247@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
17248 */
17249 static inline int local_inc_and_test(local_t *l)
17250 {
17251- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
17252+ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
17253 }
17254
17255 /**
17256@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
17257 */
17258 static inline int local_add_negative(long i, local_t *l)
17259 {
17260- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
17261+ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
17262 }
17263
17264 /**
17265@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
17266 static inline long local_add_return(long i, local_t *l)
17267 {
17268 long __i = i;
17269+ asm volatile(_ASM_XADD "%0, %1\n"
17270+
17271+#ifdef CONFIG_PAX_REFCOUNT
17272+ "jno 0f\n"
17273+ _ASM_MOV "%0,%1\n"
17274+ "int $4\n0:\n"
17275+ _ASM_EXTABLE(0b, 0b)
17276+#endif
17277+
17278+ : "+r" (i), "+m" (l->a.counter)
17279+ : : "memory");
17280+ return i + __i;
17281+}
17282+
17283+/**
17284+ * local_add_return_unchecked - add and return
17285+ * @i: integer value to add
17286+ * @l: pointer to type local_unchecked_t
17287+ *
17288+ * Atomically adds @i to @l and returns @i + @l
17289+ */
17290+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
17291+{
17292+ long __i = i;
17293 asm volatile(_ASM_XADD "%0, %1;"
17294 : "+r" (i), "+m" (l->a.counter)
17295 : : "memory");
17296@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
17297
17298 #define local_cmpxchg(l, o, n) \
17299 (cmpxchg_local(&((l)->a.counter), (o), (n)))
17300+#define local_cmpxchg_unchecked(l, o, n) \
17301+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
17302 /* Always has a lock prefix */
17303 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
17304
17305diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
17306new file mode 100644
17307index 0000000..2bfd3ba
17308--- /dev/null
17309+++ b/arch/x86/include/asm/mman.h
17310@@ -0,0 +1,15 @@
17311+#ifndef _X86_MMAN_H
17312+#define _X86_MMAN_H
17313+
17314+#include <uapi/asm/mman.h>
17315+
17316+#ifdef __KERNEL__
17317+#ifndef __ASSEMBLY__
17318+#ifdef CONFIG_X86_32
17319+#define arch_mmap_check i386_mmap_check
17320+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
17321+#endif
17322+#endif
17323+#endif
17324+
17325+#endif /* X86_MMAN_H */
17326diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
17327index 09b9620..923aecd 100644
17328--- a/arch/x86/include/asm/mmu.h
17329+++ b/arch/x86/include/asm/mmu.h
17330@@ -9,7 +9,7 @@
17331 * we put the segment information here.
17332 */
17333 typedef struct {
17334- void *ldt;
17335+ struct desc_struct *ldt;
17336 int size;
17337
17338 #ifdef CONFIG_X86_64
17339@@ -18,7 +18,19 @@ typedef struct {
17340 #endif
17341
17342 struct mutex lock;
17343- void __user *vdso;
17344+ unsigned long vdso;
17345+
17346+#ifdef CONFIG_X86_32
17347+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17348+ unsigned long user_cs_base;
17349+ unsigned long user_cs_limit;
17350+
17351+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17352+ cpumask_t cpu_user_cs_mask;
17353+#endif
17354+
17355+#endif
17356+#endif
17357
17358 atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */
17359 } mm_context_t;
17360diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
17361index 883f6b93..6869d96 100644
17362--- a/arch/x86/include/asm/mmu_context.h
17363+++ b/arch/x86/include/asm/mmu_context.h
17364@@ -42,6 +42,20 @@ void destroy_context(struct mm_struct *mm);
17365
17366 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
17367 {
17368+
17369+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17370+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
17371+ unsigned int i;
17372+ pgd_t *pgd;
17373+
17374+ pax_open_kernel();
17375+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
17376+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
17377+ set_pgd_batched(pgd+i, native_make_pgd(0));
17378+ pax_close_kernel();
17379+ }
17380+#endif
17381+
17382 #ifdef CONFIG_SMP
17383 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
17384 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
17385@@ -52,16 +66,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17386 struct task_struct *tsk)
17387 {
17388 unsigned cpu = smp_processor_id();
17389+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17390+ int tlbstate = TLBSTATE_OK;
17391+#endif
17392
17393 if (likely(prev != next)) {
17394 #ifdef CONFIG_SMP
17395+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17396+ tlbstate = this_cpu_read(cpu_tlbstate.state);
17397+#endif
17398 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17399 this_cpu_write(cpu_tlbstate.active_mm, next);
17400 #endif
17401 cpumask_set_cpu(cpu, mm_cpumask(next));
17402
17403 /* Re-load page tables */
17404+#ifdef CONFIG_PAX_PER_CPU_PGD
17405+ pax_open_kernel();
17406+
17407+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17408+ if (static_cpu_has(X86_FEATURE_PCID))
17409+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17410+ else
17411+#endif
17412+
17413+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17414+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17415+ pax_close_kernel();
17416+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17417+
17418+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17419+ if (static_cpu_has(X86_FEATURE_PCID)) {
17420+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17421+ u64 descriptor[2];
17422+ descriptor[0] = PCID_USER;
17423+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17424+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17425+ descriptor[0] = PCID_KERNEL;
17426+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17427+ }
17428+ } else {
17429+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17430+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17431+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17432+ else
17433+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17434+ }
17435+ } else
17436+#endif
17437+
17438+ load_cr3(get_cpu_pgd(cpu, kernel));
17439+#else
17440 load_cr3(next->pgd);
17441+#endif
17442 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17443
17444 /* Stop flush ipis for the previous mm */
17445@@ -84,9 +141,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17446 */
17447 if (unlikely(prev->context.ldt != next->context.ldt))
17448 load_LDT_nolock(&next->context);
17449+
17450+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17451+ if (!(__supported_pte_mask & _PAGE_NX)) {
17452+ smp_mb__before_atomic();
17453+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
17454+ smp_mb__after_atomic();
17455+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17456+ }
17457+#endif
17458+
17459+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17460+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
17461+ prev->context.user_cs_limit != next->context.user_cs_limit))
17462+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17463+#ifdef CONFIG_SMP
17464+ else if (unlikely(tlbstate != TLBSTATE_OK))
17465+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17466+#endif
17467+#endif
17468+
17469 }
17470+ else {
17471+
17472+#ifdef CONFIG_PAX_PER_CPU_PGD
17473+ pax_open_kernel();
17474+
17475+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17476+ if (static_cpu_has(X86_FEATURE_PCID))
17477+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17478+ else
17479+#endif
17480+
17481+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17482+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17483+ pax_close_kernel();
17484+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17485+
17486+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17487+ if (static_cpu_has(X86_FEATURE_PCID)) {
17488+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17489+ u64 descriptor[2];
17490+ descriptor[0] = PCID_USER;
17491+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17492+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17493+ descriptor[0] = PCID_KERNEL;
17494+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17495+ }
17496+ } else {
17497+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17498+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17499+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17500+ else
17501+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17502+ }
17503+ } else
17504+#endif
17505+
17506+ load_cr3(get_cpu_pgd(cpu, kernel));
17507+#endif
17508+
17509 #ifdef CONFIG_SMP
17510- else {
17511 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17512 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
17513
17514@@ -103,13 +218,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17515 * tlb flush IPI delivery. We must reload CR3
17516 * to make sure to use no freed page tables.
17517 */
17518+
17519+#ifndef CONFIG_PAX_PER_CPU_PGD
17520 load_cr3(next->pgd);
17521 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17522+#endif
17523+
17524 load_mm_cr4(next);
17525 load_LDT_nolock(&next->context);
17526+
17527+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17528+ if (!(__supported_pte_mask & _PAGE_NX))
17529+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17530+#endif
17531+
17532+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17533+#ifdef CONFIG_PAX_PAGEEXEC
17534+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
17535+#endif
17536+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17537+#endif
17538+
17539 }
17540+#endif
17541 }
17542-#endif
17543 }
17544
17545 #define activate_mm(prev, next) \
17546diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
17547index e3b7819..b257c64 100644
17548--- a/arch/x86/include/asm/module.h
17549+++ b/arch/x86/include/asm/module.h
17550@@ -5,6 +5,7 @@
17551
17552 #ifdef CONFIG_X86_64
17553 /* X86_64 does not define MODULE_PROC_FAMILY */
17554+#define MODULE_PROC_FAMILY ""
17555 #elif defined CONFIG_M486
17556 #define MODULE_PROC_FAMILY "486 "
17557 #elif defined CONFIG_M586
17558@@ -57,8 +58,20 @@
17559 #error unknown processor family
17560 #endif
17561
17562-#ifdef CONFIG_X86_32
17563-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
17564+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
17565+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
17566+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
17567+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
17568+#else
17569+#define MODULE_PAX_KERNEXEC ""
17570 #endif
17571
17572+#ifdef CONFIG_PAX_MEMORY_UDEREF
17573+#define MODULE_PAX_UDEREF "UDEREF "
17574+#else
17575+#define MODULE_PAX_UDEREF ""
17576+#endif
17577+
17578+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
17579+
17580 #endif /* _ASM_X86_MODULE_H */
17581diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
17582index 5f2fc44..106caa6 100644
17583--- a/arch/x86/include/asm/nmi.h
17584+++ b/arch/x86/include/asm/nmi.h
17585@@ -36,26 +36,35 @@ enum {
17586
17587 typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
17588
17589+struct nmiaction;
17590+
17591+struct nmiwork {
17592+ const struct nmiaction *action;
17593+ u64 max_duration;
17594+ struct irq_work irq_work;
17595+};
17596+
17597 struct nmiaction {
17598 struct list_head list;
17599 nmi_handler_t handler;
17600- u64 max_duration;
17601- struct irq_work irq_work;
17602 unsigned long flags;
17603 const char *name;
17604-};
17605+ struct nmiwork *work;
17606+} __do_const;
17607
17608 #define register_nmi_handler(t, fn, fg, n, init...) \
17609 ({ \
17610- static struct nmiaction init fn##_na = { \
17611+ static struct nmiwork fn##_nw; \
17612+ static const struct nmiaction init fn##_na = { \
17613 .handler = (fn), \
17614 .name = (n), \
17615 .flags = (fg), \
17616+ .work = &fn##_nw, \
17617 }; \
17618 __register_nmi_handler((t), &fn##_na); \
17619 })
17620
17621-int __register_nmi_handler(unsigned int, struct nmiaction *);
17622+int __register_nmi_handler(unsigned int, const struct nmiaction *);
17623
17624 void unregister_nmi_handler(unsigned int, const char *);
17625
17626diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
17627index 802dde3..9183e68 100644
17628--- a/arch/x86/include/asm/page.h
17629+++ b/arch/x86/include/asm/page.h
17630@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17631 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
17632
17633 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
17634+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
17635
17636 #define __boot_va(x) __va(x)
17637 #define __boot_pa(x) __pa(x)
17638@@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17639 * virt_to_page(kaddr) returns a valid pointer if and only if
17640 * virt_addr_valid(kaddr) returns true.
17641 */
17642-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17643 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
17644 extern bool __virt_addr_valid(unsigned long kaddr);
17645 #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
17646
17647+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
17648+#define virt_to_page(kaddr) \
17649+ ({ \
17650+ const void *__kaddr = (const void *)(kaddr); \
17651+ BUG_ON(!virt_addr_valid(__kaddr)); \
17652+ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
17653+ })
17654+#else
17655+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17656+#endif
17657+
17658 #endif /* __ASSEMBLY__ */
17659
17660 #include <asm-generic/memory_model.h>
17661diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
17662index b3bebf9..13ac22e 100644
17663--- a/arch/x86/include/asm/page_64.h
17664+++ b/arch/x86/include/asm/page_64.h
17665@@ -7,9 +7,9 @@
17666
17667 /* duplicated to the one in bootmem.h */
17668 extern unsigned long max_pfn;
17669-extern unsigned long phys_base;
17670+extern const unsigned long phys_base;
17671
17672-static inline unsigned long __phys_addr_nodebug(unsigned long x)
17673+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
17674 {
17675 unsigned long y = x - __START_KERNEL_map;
17676
17677@@ -20,8 +20,8 @@ static inline unsigned long __phys_addr_nodebug(unsigned long x)
17678 }
17679
17680 #ifdef CONFIG_DEBUG_VIRTUAL
17681-extern unsigned long __phys_addr(unsigned long);
17682-extern unsigned long __phys_addr_symbol(unsigned long);
17683+extern unsigned long __intentional_overflow(-1) __phys_addr(unsigned long);
17684+extern unsigned long __intentional_overflow(-1) __phys_addr_symbol(unsigned long);
17685 #else
17686 #define __phys_addr(x) __phys_addr_nodebug(x)
17687 #define __phys_addr_symbol(x) \
17688diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
17689index 965c47d..ffe0af8 100644
17690--- a/arch/x86/include/asm/paravirt.h
17691+++ b/arch/x86/include/asm/paravirt.h
17692@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
17693 return (pmd_t) { ret };
17694 }
17695
17696-static inline pmdval_t pmd_val(pmd_t pmd)
17697+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
17698 {
17699 pmdval_t ret;
17700
17701@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
17702 val);
17703 }
17704
17705+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17706+{
17707+ pgdval_t val = native_pgd_val(pgd);
17708+
17709+ if (sizeof(pgdval_t) > sizeof(long))
17710+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
17711+ val, (u64)val >> 32);
17712+ else
17713+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
17714+ val);
17715+}
17716+
17717 static inline void pgd_clear(pgd_t *pgdp)
17718 {
17719 set_pgd(pgdp, __pgd(0));
17720@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
17721 pv_mmu_ops.set_fixmap(idx, phys, flags);
17722 }
17723
17724+#ifdef CONFIG_PAX_KERNEXEC
17725+static inline unsigned long pax_open_kernel(void)
17726+{
17727+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
17728+}
17729+
17730+static inline unsigned long pax_close_kernel(void)
17731+{
17732+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
17733+}
17734+#else
17735+static inline unsigned long pax_open_kernel(void) { return 0; }
17736+static inline unsigned long pax_close_kernel(void) { return 0; }
17737+#endif
17738+
17739 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
17740
17741 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
17742@@ -906,7 +933,7 @@ extern void default_banner(void);
17743
17744 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
17745 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
17746-#define PARA_INDIRECT(addr) *%cs:addr
17747+#define PARA_INDIRECT(addr) *%ss:addr
17748 #endif
17749
17750 #define INTERRUPT_RETURN \
17751@@ -981,6 +1008,21 @@ extern void default_banner(void);
17752 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
17753 CLBR_NONE, \
17754 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
17755+
17756+#define GET_CR0_INTO_RDI \
17757+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
17758+ mov %rax,%rdi
17759+
17760+#define SET_RDI_INTO_CR0 \
17761+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
17762+
17763+#define GET_CR3_INTO_RDI \
17764+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
17765+ mov %rax,%rdi
17766+
17767+#define SET_RDI_INTO_CR3 \
17768+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
17769+
17770 #endif /* CONFIG_X86_32 */
17771
17772 #endif /* __ASSEMBLY__ */
17773diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
17774index 7549b8b..f0edfda 100644
17775--- a/arch/x86/include/asm/paravirt_types.h
17776+++ b/arch/x86/include/asm/paravirt_types.h
17777@@ -84,7 +84,7 @@ struct pv_init_ops {
17778 */
17779 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
17780 unsigned long addr, unsigned len);
17781-};
17782+} __no_const __no_randomize_layout;
17783
17784
17785 struct pv_lazy_ops {
17786@@ -92,13 +92,13 @@ struct pv_lazy_ops {
17787 void (*enter)(void);
17788 void (*leave)(void);
17789 void (*flush)(void);
17790-};
17791+} __no_randomize_layout;
17792
17793 struct pv_time_ops {
17794 unsigned long long (*sched_clock)(void);
17795 unsigned long long (*steal_clock)(int cpu);
17796 unsigned long (*get_tsc_khz)(void);
17797-};
17798+} __no_const __no_randomize_layout;
17799
17800 struct pv_cpu_ops {
17801 /* hooks for various privileged instructions */
17802@@ -192,7 +192,7 @@ struct pv_cpu_ops {
17803
17804 void (*start_context_switch)(struct task_struct *prev);
17805 void (*end_context_switch)(struct task_struct *next);
17806-};
17807+} __no_const __no_randomize_layout;
17808
17809 struct pv_irq_ops {
17810 /*
17811@@ -215,7 +215,7 @@ struct pv_irq_ops {
17812 #ifdef CONFIG_X86_64
17813 void (*adjust_exception_frame)(void);
17814 #endif
17815-};
17816+} __no_randomize_layout;
17817
17818 struct pv_apic_ops {
17819 #ifdef CONFIG_X86_LOCAL_APIC
17820@@ -223,7 +223,7 @@ struct pv_apic_ops {
17821 unsigned long start_eip,
17822 unsigned long start_esp);
17823 #endif
17824-};
17825+} __no_const __no_randomize_layout;
17826
17827 struct pv_mmu_ops {
17828 unsigned long (*read_cr2)(void);
17829@@ -313,6 +313,7 @@ struct pv_mmu_ops {
17830 struct paravirt_callee_save make_pud;
17831
17832 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
17833+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
17834 #endif /* PAGETABLE_LEVELS == 4 */
17835 #endif /* PAGETABLE_LEVELS >= 3 */
17836
17837@@ -324,7 +325,13 @@ struct pv_mmu_ops {
17838 an mfn. We can tell which is which from the index. */
17839 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
17840 phys_addr_t phys, pgprot_t flags);
17841-};
17842+
17843+#ifdef CONFIG_PAX_KERNEXEC
17844+ unsigned long (*pax_open_kernel)(void);
17845+ unsigned long (*pax_close_kernel)(void);
17846+#endif
17847+
17848+} __no_randomize_layout;
17849
17850 struct arch_spinlock;
17851 #ifdef CONFIG_SMP
17852@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
17853 struct pv_lock_ops {
17854 struct paravirt_callee_save lock_spinning;
17855 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
17856-};
17857+} __no_randomize_layout;
17858
17859 /* This contains all the paravirt structures: we get a convenient
17860 * number for each function using the offset which we use to indicate
17861- * what to patch. */
17862+ * what to patch.
17863+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
17864+ */
17865+
17866 struct paravirt_patch_template {
17867 struct pv_init_ops pv_init_ops;
17868 struct pv_time_ops pv_time_ops;
17869@@ -349,7 +359,7 @@ struct paravirt_patch_template {
17870 struct pv_apic_ops pv_apic_ops;
17871 struct pv_mmu_ops pv_mmu_ops;
17872 struct pv_lock_ops pv_lock_ops;
17873-};
17874+} __no_randomize_layout;
17875
17876 extern struct pv_info pv_info;
17877 extern struct pv_init_ops pv_init_ops;
17878diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
17879index c4412e9..90e88c5 100644
17880--- a/arch/x86/include/asm/pgalloc.h
17881+++ b/arch/x86/include/asm/pgalloc.h
17882@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
17883 pmd_t *pmd, pte_t *pte)
17884 {
17885 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17886+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
17887+}
17888+
17889+static inline void pmd_populate_user(struct mm_struct *mm,
17890+ pmd_t *pmd, pte_t *pte)
17891+{
17892+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17893 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
17894 }
17895
17896@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
17897
17898 #ifdef CONFIG_X86_PAE
17899 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
17900+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
17901+{
17902+ pud_populate(mm, pudp, pmd);
17903+}
17904 #else /* !CONFIG_X86_PAE */
17905 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17906 {
17907 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17908 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
17909 }
17910+
17911+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17912+{
17913+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17914+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
17915+}
17916 #endif /* CONFIG_X86_PAE */
17917
17918 #if PAGETABLE_LEVELS > 3
17919@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17920 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
17921 }
17922
17923+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17924+{
17925+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
17926+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
17927+}
17928+
17929 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
17930 {
17931 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
17932diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
17933index fd74a11..35fd5af 100644
17934--- a/arch/x86/include/asm/pgtable-2level.h
17935+++ b/arch/x86/include/asm/pgtable-2level.h
17936@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
17937
17938 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17939 {
17940+ pax_open_kernel();
17941 *pmdp = pmd;
17942+ pax_close_kernel();
17943 }
17944
17945 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17946diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
17947index cdaa58c..e61122b 100644
17948--- a/arch/x86/include/asm/pgtable-3level.h
17949+++ b/arch/x86/include/asm/pgtable-3level.h
17950@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17951
17952 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17953 {
17954+ pax_open_kernel();
17955 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
17956+ pax_close_kernel();
17957 }
17958
17959 static inline void native_set_pud(pud_t *pudp, pud_t pud)
17960 {
17961+ pax_open_kernel();
17962 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
17963+ pax_close_kernel();
17964 }
17965
17966 /*
17967diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
17968index a0c35bf..7045c6a 100644
17969--- a/arch/x86/include/asm/pgtable.h
17970+++ b/arch/x86/include/asm/pgtable.h
17971@@ -47,6 +47,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17972
17973 #ifndef __PAGETABLE_PUD_FOLDED
17974 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
17975+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
17976 #define pgd_clear(pgd) native_pgd_clear(pgd)
17977 #endif
17978
17979@@ -84,12 +85,53 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17980
17981 #define arch_end_context_switch(prev) do {} while(0)
17982
17983+#define pax_open_kernel() native_pax_open_kernel()
17984+#define pax_close_kernel() native_pax_close_kernel()
17985 #endif /* CONFIG_PARAVIRT */
17986
17987+#define __HAVE_ARCH_PAX_OPEN_KERNEL
17988+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
17989+
17990+#ifdef CONFIG_PAX_KERNEXEC
17991+static inline unsigned long native_pax_open_kernel(void)
17992+{
17993+ unsigned long cr0;
17994+
17995+ preempt_disable();
17996+ barrier();
17997+ cr0 = read_cr0() ^ X86_CR0_WP;
17998+ BUG_ON(cr0 & X86_CR0_WP);
17999+ write_cr0(cr0);
18000+ barrier();
18001+ return cr0 ^ X86_CR0_WP;
18002+}
18003+
18004+static inline unsigned long native_pax_close_kernel(void)
18005+{
18006+ unsigned long cr0;
18007+
18008+ barrier();
18009+ cr0 = read_cr0() ^ X86_CR0_WP;
18010+ BUG_ON(!(cr0 & X86_CR0_WP));
18011+ write_cr0(cr0);
18012+ barrier();
18013+ preempt_enable_no_resched();
18014+ return cr0 ^ X86_CR0_WP;
18015+}
18016+#else
18017+static inline unsigned long native_pax_open_kernel(void) { return 0; }
18018+static inline unsigned long native_pax_close_kernel(void) { return 0; }
18019+#endif
18020+
18021 /*
18022 * The following only work if pte_present() is true.
18023 * Undefined behaviour if not..
18024 */
18025+static inline int pte_user(pte_t pte)
18026+{
18027+ return pte_val(pte) & _PAGE_USER;
18028+}
18029+
18030 static inline int pte_dirty(pte_t pte)
18031 {
18032 return pte_flags(pte) & _PAGE_DIRTY;
18033@@ -150,6 +192,11 @@ static inline unsigned long pud_pfn(pud_t pud)
18034 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
18035 }
18036
18037+static inline unsigned long pgd_pfn(pgd_t pgd)
18038+{
18039+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
18040+}
18041+
18042 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
18043
18044 static inline int pmd_large(pmd_t pte)
18045@@ -203,9 +250,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
18046 return pte_clear_flags(pte, _PAGE_RW);
18047 }
18048
18049+static inline pte_t pte_mkread(pte_t pte)
18050+{
18051+ return __pte(pte_val(pte) | _PAGE_USER);
18052+}
18053+
18054 static inline pte_t pte_mkexec(pte_t pte)
18055 {
18056- return pte_clear_flags(pte, _PAGE_NX);
18057+#ifdef CONFIG_X86_PAE
18058+ if (__supported_pte_mask & _PAGE_NX)
18059+ return pte_clear_flags(pte, _PAGE_NX);
18060+ else
18061+#endif
18062+ return pte_set_flags(pte, _PAGE_USER);
18063+}
18064+
18065+static inline pte_t pte_exprotect(pte_t pte)
18066+{
18067+#ifdef CONFIG_X86_PAE
18068+ if (__supported_pte_mask & _PAGE_NX)
18069+ return pte_set_flags(pte, _PAGE_NX);
18070+ else
18071+#endif
18072+ return pte_clear_flags(pte, _PAGE_USER);
18073 }
18074
18075 static inline pte_t pte_mkdirty(pte_t pte)
18076@@ -420,6 +487,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
18077 #endif
18078
18079 #ifndef __ASSEMBLY__
18080+
18081+#ifdef CONFIG_PAX_PER_CPU_PGD
18082+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
18083+enum cpu_pgd_type {kernel = 0, user = 1};
18084+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
18085+{
18086+ return cpu_pgd[cpu][type];
18087+}
18088+#endif
18089+
18090 #include <linux/mm_types.h>
18091 #include <linux/mmdebug.h>
18092 #include <linux/log2.h>
18093@@ -571,7 +648,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
18094 * Currently stuck as a macro due to indirect forward reference to
18095 * linux/mmzone.h's __section_mem_map_addr() definition:
18096 */
18097-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
18098+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
18099
18100 /* Find an entry in the second-level page table.. */
18101 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
18102@@ -611,7 +688,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
18103 * Currently stuck as a macro due to indirect forward reference to
18104 * linux/mmzone.h's __section_mem_map_addr() definition:
18105 */
18106-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
18107+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
18108
18109 /* to find an entry in a page-table-directory. */
18110 static inline unsigned long pud_index(unsigned long address)
18111@@ -626,7 +703,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
18112
18113 static inline int pgd_bad(pgd_t pgd)
18114 {
18115- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
18116+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
18117 }
18118
18119 static inline int pgd_none(pgd_t pgd)
18120@@ -649,7 +726,12 @@ static inline int pgd_none(pgd_t pgd)
18121 * pgd_offset() returns a (pgd_t *)
18122 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
18123 */
18124-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
18125+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
18126+
18127+#ifdef CONFIG_PAX_PER_CPU_PGD
18128+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
18129+#endif
18130+
18131 /*
18132 * a shortcut which implies the use of the kernel's pgd, instead
18133 * of a process's
18134@@ -660,6 +742,25 @@ static inline int pgd_none(pgd_t pgd)
18135 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
18136 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
18137
18138+#ifdef CONFIG_X86_32
18139+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
18140+#else
18141+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
18142+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
18143+
18144+#ifdef CONFIG_PAX_MEMORY_UDEREF
18145+#ifdef __ASSEMBLY__
18146+#define pax_user_shadow_base pax_user_shadow_base(%rip)
18147+#else
18148+extern unsigned long pax_user_shadow_base;
18149+extern pgdval_t clone_pgd_mask;
18150+#endif
18151+#else
18152+#define pax_user_shadow_base (0UL)
18153+#endif
18154+
18155+#endif
18156+
18157 #ifndef __ASSEMBLY__
18158
18159 extern int direct_gbpages;
18160@@ -826,11 +927,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
18161 * dst and src can be on the same page, but the range must not overlap,
18162 * and must not cross a page boundary.
18163 */
18164-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
18165+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
18166 {
18167- memcpy(dst, src, count * sizeof(pgd_t));
18168+ pax_open_kernel();
18169+ while (count--)
18170+ *dst++ = *src++;
18171+ pax_close_kernel();
18172 }
18173
18174+#ifdef CONFIG_PAX_PER_CPU_PGD
18175+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
18176+#endif
18177+
18178+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18179+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
18180+#else
18181+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
18182+#endif
18183+
18184 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
18185 static inline int page_level_shift(enum pg_level level)
18186 {
18187diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
18188index b6c0b40..3535d47 100644
18189--- a/arch/x86/include/asm/pgtable_32.h
18190+++ b/arch/x86/include/asm/pgtable_32.h
18191@@ -25,9 +25,6 @@
18192 struct mm_struct;
18193 struct vm_area_struct;
18194
18195-extern pgd_t swapper_pg_dir[1024];
18196-extern pgd_t initial_page_table[1024];
18197-
18198 static inline void pgtable_cache_init(void) { }
18199 static inline void check_pgt_cache(void) { }
18200 void paging_init(void);
18201@@ -45,6 +42,12 @@ void paging_init(void);
18202 # include <asm/pgtable-2level.h>
18203 #endif
18204
18205+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
18206+extern pgd_t initial_page_table[PTRS_PER_PGD];
18207+#ifdef CONFIG_X86_PAE
18208+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
18209+#endif
18210+
18211 #if defined(CONFIG_HIGHPTE)
18212 #define pte_offset_map(dir, address) \
18213 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
18214@@ -59,12 +62,17 @@ void paging_init(void);
18215 /* Clear a kernel PTE and flush it from the TLB */
18216 #define kpte_clear_flush(ptep, vaddr) \
18217 do { \
18218+ pax_open_kernel(); \
18219 pte_clear(&init_mm, (vaddr), (ptep)); \
18220+ pax_close_kernel(); \
18221 __flush_tlb_one((vaddr)); \
18222 } while (0)
18223
18224 #endif /* !__ASSEMBLY__ */
18225
18226+#define HAVE_ARCH_UNMAPPED_AREA
18227+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
18228+
18229 /*
18230 * kern_addr_valid() is (1) for FLATMEM and (0) for
18231 * SPARSEMEM and DISCONTIGMEM
18232diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
18233index 9fb2f2b..b04b4bf 100644
18234--- a/arch/x86/include/asm/pgtable_32_types.h
18235+++ b/arch/x86/include/asm/pgtable_32_types.h
18236@@ -8,7 +8,7 @@
18237 */
18238 #ifdef CONFIG_X86_PAE
18239 # include <asm/pgtable-3level_types.h>
18240-# define PMD_SIZE (1UL << PMD_SHIFT)
18241+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
18242 # define PMD_MASK (~(PMD_SIZE - 1))
18243 #else
18244 # include <asm/pgtable-2level_types.h>
18245@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
18246 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
18247 #endif
18248
18249+#ifdef CONFIG_PAX_KERNEXEC
18250+#ifndef __ASSEMBLY__
18251+extern unsigned char MODULES_EXEC_VADDR[];
18252+extern unsigned char MODULES_EXEC_END[];
18253+#endif
18254+#include <asm/boot.h>
18255+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
18256+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
18257+#else
18258+#define ktla_ktva(addr) (addr)
18259+#define ktva_ktla(addr) (addr)
18260+#endif
18261+
18262 #define MODULES_VADDR VMALLOC_START
18263 #define MODULES_END VMALLOC_END
18264 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
18265diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
18266index 2ee7811..55aca24 100644
18267--- a/arch/x86/include/asm/pgtable_64.h
18268+++ b/arch/x86/include/asm/pgtable_64.h
18269@@ -16,11 +16,16 @@
18270
18271 extern pud_t level3_kernel_pgt[512];
18272 extern pud_t level3_ident_pgt[512];
18273+extern pud_t level3_vmalloc_start_pgt[512];
18274+extern pud_t level3_vmalloc_end_pgt[512];
18275+extern pud_t level3_vmemmap_pgt[512];
18276+extern pud_t level2_vmemmap_pgt[512];
18277 extern pmd_t level2_kernel_pgt[512];
18278 extern pmd_t level2_fixmap_pgt[512];
18279-extern pmd_t level2_ident_pgt[512];
18280-extern pte_t level1_fixmap_pgt[512];
18281-extern pgd_t init_level4_pgt[];
18282+extern pmd_t level2_ident_pgt[2][512];
18283+extern pte_t level1_fixmap_pgt[3][512];
18284+extern pte_t level1_vsyscall_pgt[512];
18285+extern pgd_t init_level4_pgt[512];
18286
18287 #define swapper_pg_dir init_level4_pgt
18288
18289@@ -62,7 +67,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18290
18291 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18292 {
18293+ pax_open_kernel();
18294 *pmdp = pmd;
18295+ pax_close_kernel();
18296 }
18297
18298 static inline void native_pmd_clear(pmd_t *pmd)
18299@@ -98,7 +105,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
18300
18301 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18302 {
18303+ pax_open_kernel();
18304 *pudp = pud;
18305+ pax_close_kernel();
18306 }
18307
18308 static inline void native_pud_clear(pud_t *pud)
18309@@ -108,6 +117,13 @@ static inline void native_pud_clear(pud_t *pud)
18310
18311 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
18312 {
18313+ pax_open_kernel();
18314+ *pgdp = pgd;
18315+ pax_close_kernel();
18316+}
18317+
18318+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
18319+{
18320 *pgdp = pgd;
18321 }
18322
18323diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
18324index 602b602..acb53ed 100644
18325--- a/arch/x86/include/asm/pgtable_64_types.h
18326+++ b/arch/x86/include/asm/pgtable_64_types.h
18327@@ -61,11 +61,16 @@ typedef struct { pteval_t pte; } pte_t;
18328 #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
18329 #define MODULES_END _AC(0xffffffffff000000, UL)
18330 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
18331+#define MODULES_EXEC_VADDR MODULES_VADDR
18332+#define MODULES_EXEC_END MODULES_END
18333 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
18334 #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
18335 #define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
18336 #define EFI_VA_END (-68 * (_AC(1, UL) << 30))
18337
18338+#define ktla_ktva(addr) (addr)
18339+#define ktva_ktla(addr) (addr)
18340+
18341 #define EARLY_DYNAMIC_PAGE_TABLES 64
18342
18343 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
18344diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
18345index 8c7c108..1c1b77f 100644
18346--- a/arch/x86/include/asm/pgtable_types.h
18347+++ b/arch/x86/include/asm/pgtable_types.h
18348@@ -85,8 +85,10 @@
18349
18350 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
18351 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
18352-#else
18353+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
18354 #define _PAGE_NX (_AT(pteval_t, 0))
18355+#else
18356+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
18357 #endif
18358
18359 #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
18360@@ -141,6 +143,9 @@ enum page_cache_mode {
18361 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
18362 _PAGE_ACCESSED)
18363
18364+#define PAGE_READONLY_NOEXEC PAGE_READONLY
18365+#define PAGE_SHARED_NOEXEC PAGE_SHARED
18366+
18367 #define __PAGE_KERNEL_EXEC \
18368 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
18369 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
18370@@ -148,7 +153,7 @@ enum page_cache_mode {
18371 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
18372 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
18373 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE)
18374-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
18375+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
18376 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
18377 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
18378 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
18379@@ -194,7 +199,7 @@ enum page_cache_mode {
18380 #ifdef CONFIG_X86_64
18381 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
18382 #else
18383-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
18384+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18385 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18386 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
18387 #endif
18388@@ -233,7 +238,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
18389 {
18390 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
18391 }
18392+#endif
18393
18394+#if PAGETABLE_LEVELS == 3
18395+#include <asm-generic/pgtable-nopud.h>
18396+#endif
18397+
18398+#if PAGETABLE_LEVELS == 2
18399+#include <asm-generic/pgtable-nopmd.h>
18400+#endif
18401+
18402+#ifndef __ASSEMBLY__
18403 #if PAGETABLE_LEVELS > 3
18404 typedef struct { pudval_t pud; } pud_t;
18405
18406@@ -247,8 +262,6 @@ static inline pudval_t native_pud_val(pud_t pud)
18407 return pud.pud;
18408 }
18409 #else
18410-#include <asm-generic/pgtable-nopud.h>
18411-
18412 static inline pudval_t native_pud_val(pud_t pud)
18413 {
18414 return native_pgd_val(pud.pgd);
18415@@ -268,8 +281,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
18416 return pmd.pmd;
18417 }
18418 #else
18419-#include <asm-generic/pgtable-nopmd.h>
18420-
18421 static inline pmdval_t native_pmd_val(pmd_t pmd)
18422 {
18423 return native_pgd_val(pmd.pud.pgd);
18424@@ -362,7 +373,6 @@ typedef struct page *pgtable_t;
18425
18426 extern pteval_t __supported_pte_mask;
18427 extern void set_nx(void);
18428-extern int nx_enabled;
18429
18430 #define pgprot_writecombine pgprot_writecombine
18431 extern pgprot_t pgprot_writecombine(pgprot_t prot);
18432diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
18433index 8f327184..368fb29 100644
18434--- a/arch/x86/include/asm/preempt.h
18435+++ b/arch/x86/include/asm/preempt.h
18436@@ -84,7 +84,7 @@ static __always_inline void __preempt_count_sub(int val)
18437 */
18438 static __always_inline bool __preempt_count_dec_and_test(void)
18439 {
18440- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
18441+ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
18442 }
18443
18444 /*
18445diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
18446index ec1c935..5cc6023 100644
18447--- a/arch/x86/include/asm/processor.h
18448+++ b/arch/x86/include/asm/processor.h
18449@@ -127,7 +127,7 @@ struct cpuinfo_x86 {
18450 /* Index into per_cpu list: */
18451 u16 cpu_index;
18452 u32 microcode;
18453-};
18454+} __randomize_layout;
18455
18456 #define X86_VENDOR_INTEL 0
18457 #define X86_VENDOR_CYRIX 1
18458@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
18459 : "memory");
18460 }
18461
18462+/* invpcid (%rdx),%rax */
18463+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
18464+
18465+#define INVPCID_SINGLE_ADDRESS 0UL
18466+#define INVPCID_SINGLE_CONTEXT 1UL
18467+#define INVPCID_ALL_GLOBAL 2UL
18468+#define INVPCID_ALL_NONGLOBAL 3UL
18469+
18470+#define PCID_KERNEL 0UL
18471+#define PCID_USER 1UL
18472+#define PCID_NOFLUSH (1UL << 63)
18473+
18474 static inline void load_cr3(pgd_t *pgdir)
18475 {
18476- write_cr3(__pa(pgdir));
18477+ write_cr3(__pa(pgdir) | PCID_KERNEL);
18478 }
18479
18480 #ifdef CONFIG_X86_32
18481@@ -282,7 +294,7 @@ struct tss_struct {
18482
18483 } ____cacheline_aligned;
18484
18485-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
18486+extern struct tss_struct init_tss[NR_CPUS];
18487
18488 /*
18489 * Save the original ist values for checking stack pointers during debugging
18490@@ -479,6 +491,7 @@ struct thread_struct {
18491 unsigned short ds;
18492 unsigned short fsindex;
18493 unsigned short gsindex;
18494+ unsigned short ss;
18495 #endif
18496 #ifdef CONFIG_X86_32
18497 unsigned long ip;
18498@@ -805,11 +818,18 @@ static inline void spin_lock_prefetch(const void *x)
18499 */
18500 #define TASK_SIZE PAGE_OFFSET
18501 #define TASK_SIZE_MAX TASK_SIZE
18502+
18503+#ifdef CONFIG_PAX_SEGMEXEC
18504+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
18505+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
18506+#else
18507 #define STACK_TOP TASK_SIZE
18508-#define STACK_TOP_MAX STACK_TOP
18509+#endif
18510+
18511+#define STACK_TOP_MAX TASK_SIZE
18512
18513 #define INIT_THREAD { \
18514- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18515+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18516 .vm86_info = NULL, \
18517 .sysenter_cs = __KERNEL_CS, \
18518 .io_bitmap_ptr = NULL, \
18519@@ -823,7 +843,7 @@ static inline void spin_lock_prefetch(const void *x)
18520 */
18521 #define INIT_TSS { \
18522 .x86_tss = { \
18523- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18524+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18525 .ss0 = __KERNEL_DS, \
18526 .ss1 = __KERNEL_CS, \
18527 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
18528@@ -834,11 +854,7 @@ static inline void spin_lock_prefetch(const void *x)
18529 extern unsigned long thread_saved_pc(struct task_struct *tsk);
18530
18531 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
18532-#define KSTK_TOP(info) \
18533-({ \
18534- unsigned long *__ptr = (unsigned long *)(info); \
18535- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
18536-})
18537+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
18538
18539 /*
18540 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
18541@@ -853,7 +869,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18542 #define task_pt_regs(task) \
18543 ({ \
18544 struct pt_regs *__regs__; \
18545- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
18546+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
18547 __regs__ - 1; \
18548 })
18549
18550@@ -869,13 +885,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18551 * particular problem by preventing anything from being mapped
18552 * at the maximum canonical address.
18553 */
18554-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
18555+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
18556
18557 /* This decides where the kernel will search for a free chunk of vm
18558 * space during mmap's.
18559 */
18560 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
18561- 0xc0000000 : 0xFFFFe000)
18562+ 0xc0000000 : 0xFFFFf000)
18563
18564 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
18565 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
18566@@ -886,11 +902,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18567 #define STACK_TOP_MAX TASK_SIZE_MAX
18568
18569 #define INIT_THREAD { \
18570- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18571+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18572 }
18573
18574 #define INIT_TSS { \
18575- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18576+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18577 }
18578
18579 /*
18580@@ -918,6 +934,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
18581 */
18582 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
18583
18584+#ifdef CONFIG_PAX_SEGMEXEC
18585+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
18586+#endif
18587+
18588 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
18589
18590 /* Get/set a process' ability to use the timestamp counter instruction */
18591@@ -962,7 +982,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
18592 return 0;
18593 }
18594
18595-extern unsigned long arch_align_stack(unsigned long sp);
18596+#define arch_align_stack(x) ((x) & ~0xfUL)
18597 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
18598
18599 void default_idle(void);
18600@@ -972,6 +992,6 @@ bool xen_set_default_idle(void);
18601 #define xen_set_default_idle 0
18602 #endif
18603
18604-void stop_this_cpu(void *dummy);
18605+void stop_this_cpu(void *dummy) __noreturn;
18606 void df_debug(struct pt_regs *regs, long error_code);
18607 #endif /* _ASM_X86_PROCESSOR_H */
18608diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
18609index 86fc2bb..bd5049a 100644
18610--- a/arch/x86/include/asm/ptrace.h
18611+++ b/arch/x86/include/asm/ptrace.h
18612@@ -89,28 +89,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
18613 }
18614
18615 /*
18616- * user_mode_vm(regs) determines whether a register set came from user mode.
18617+ * user_mode(regs) determines whether a register set came from user mode.
18618 * This is true if V8086 mode was enabled OR if the register set was from
18619 * protected mode with RPL-3 CS value. This tricky test checks that with
18620 * one comparison. Many places in the kernel can bypass this full check
18621- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
18622+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
18623+ * be used.
18624 */
18625-static inline int user_mode(struct pt_regs *regs)
18626+static inline int user_mode_novm(struct pt_regs *regs)
18627 {
18628 #ifdef CONFIG_X86_32
18629 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
18630 #else
18631- return !!(regs->cs & 3);
18632+ return !!(regs->cs & SEGMENT_RPL_MASK);
18633 #endif
18634 }
18635
18636-static inline int user_mode_vm(struct pt_regs *regs)
18637+static inline int user_mode(struct pt_regs *regs)
18638 {
18639 #ifdef CONFIG_X86_32
18640 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
18641 USER_RPL;
18642 #else
18643- return user_mode(regs);
18644+ return user_mode_novm(regs);
18645 #endif
18646 }
18647
18648@@ -126,15 +127,16 @@ static inline int v8086_mode(struct pt_regs *regs)
18649 #ifdef CONFIG_X86_64
18650 static inline bool user_64bit_mode(struct pt_regs *regs)
18651 {
18652+ unsigned long cs = regs->cs & 0xffff;
18653 #ifndef CONFIG_PARAVIRT
18654 /*
18655 * On non-paravirt systems, this is the only long mode CPL 3
18656 * selector. We do not allow long mode selectors in the LDT.
18657 */
18658- return regs->cs == __USER_CS;
18659+ return cs == __USER_CS;
18660 #else
18661 /* Headers are too twisted for this to go in paravirt.h. */
18662- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
18663+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
18664 #endif
18665 }
18666
18667@@ -185,9 +187,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
18668 * Traps from the kernel do not save sp and ss.
18669 * Use the helper function to retrieve sp.
18670 */
18671- if (offset == offsetof(struct pt_regs, sp) &&
18672- regs->cs == __KERNEL_CS)
18673- return kernel_stack_pointer(regs);
18674+ if (offset == offsetof(struct pt_regs, sp)) {
18675+ unsigned long cs = regs->cs & 0xffff;
18676+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
18677+ return kernel_stack_pointer(regs);
18678+ }
18679 #endif
18680 return *(unsigned long *)((unsigned long)regs + offset);
18681 }
18682diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
18683index ae0e241..e80b10b 100644
18684--- a/arch/x86/include/asm/qrwlock.h
18685+++ b/arch/x86/include/asm/qrwlock.h
18686@@ -7,8 +7,8 @@
18687 #define queue_write_unlock queue_write_unlock
18688 static inline void queue_write_unlock(struct qrwlock *lock)
18689 {
18690- barrier();
18691- ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
18692+ barrier();
18693+ ACCESS_ONCE_RW(*(u8 *)&lock->cnts) = 0;
18694 }
18695 #endif
18696
18697diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
18698index 9c6b890..5305f53 100644
18699--- a/arch/x86/include/asm/realmode.h
18700+++ b/arch/x86/include/asm/realmode.h
18701@@ -22,16 +22,14 @@ struct real_mode_header {
18702 #endif
18703 /* APM/BIOS reboot */
18704 u32 machine_real_restart_asm;
18705-#ifdef CONFIG_X86_64
18706 u32 machine_real_restart_seg;
18707-#endif
18708 };
18709
18710 /* This must match data at trampoline_32/64.S */
18711 struct trampoline_header {
18712 #ifdef CONFIG_X86_32
18713 u32 start;
18714- u16 gdt_pad;
18715+ u16 boot_cs;
18716 u16 gdt_limit;
18717 u32 gdt_base;
18718 #else
18719diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
18720index a82c4f1..ac45053 100644
18721--- a/arch/x86/include/asm/reboot.h
18722+++ b/arch/x86/include/asm/reboot.h
18723@@ -6,13 +6,13 @@
18724 struct pt_regs;
18725
18726 struct machine_ops {
18727- void (*restart)(char *cmd);
18728- void (*halt)(void);
18729- void (*power_off)(void);
18730+ void (* __noreturn restart)(char *cmd);
18731+ void (* __noreturn halt)(void);
18732+ void (* __noreturn power_off)(void);
18733 void (*shutdown)(void);
18734 void (*crash_shutdown)(struct pt_regs *);
18735- void (*emergency_restart)(void);
18736-};
18737+ void (* __noreturn emergency_restart)(void);
18738+} __no_const;
18739
18740 extern struct machine_ops machine_ops;
18741
18742diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
18743index 8f7866a..e442f20 100644
18744--- a/arch/x86/include/asm/rmwcc.h
18745+++ b/arch/x86/include/asm/rmwcc.h
18746@@ -3,7 +3,34 @@
18747
18748 #ifdef CC_HAVE_ASM_GOTO
18749
18750-#define __GEN_RMWcc(fullop, var, cc, ...) \
18751+#ifdef CONFIG_PAX_REFCOUNT
18752+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18753+do { \
18754+ asm_volatile_goto (fullop \
18755+ ";jno 0f\n" \
18756+ fullantiop \
18757+ ";int $4\n0:\n" \
18758+ _ASM_EXTABLE(0b, 0b) \
18759+ ";j" cc " %l[cc_label]" \
18760+ : : "m" (var), ## __VA_ARGS__ \
18761+ : "memory" : cc_label); \
18762+ return 0; \
18763+cc_label: \
18764+ return 1; \
18765+} while (0)
18766+#else
18767+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18768+do { \
18769+ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
18770+ : : "m" (var), ## __VA_ARGS__ \
18771+ : "memory" : cc_label); \
18772+ return 0; \
18773+cc_label: \
18774+ return 1; \
18775+} while (0)
18776+#endif
18777+
18778+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18779 do { \
18780 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
18781 : : "m" (var), ## __VA_ARGS__ \
18782@@ -13,15 +40,46 @@ cc_label: \
18783 return 1; \
18784 } while (0)
18785
18786-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18787- __GEN_RMWcc(op " " arg0, var, cc)
18788+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18789+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18790
18791-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18792- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
18793+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18794+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18795+
18796+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18797+ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
18798+
18799+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18800+ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
18801
18802 #else /* !CC_HAVE_ASM_GOTO */
18803
18804-#define __GEN_RMWcc(fullop, var, cc, ...) \
18805+#ifdef CONFIG_PAX_REFCOUNT
18806+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18807+do { \
18808+ char c; \
18809+ asm volatile (fullop \
18810+ ";jno 0f\n" \
18811+ fullantiop \
18812+ ";int $4\n0:\n" \
18813+ _ASM_EXTABLE(0b, 0b) \
18814+ "; set" cc " %1" \
18815+ : "+m" (var), "=qm" (c) \
18816+ : __VA_ARGS__ : "memory"); \
18817+ return c != 0; \
18818+} while (0)
18819+#else
18820+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18821+do { \
18822+ char c; \
18823+ asm volatile (fullop "; set" cc " %1" \
18824+ : "+m" (var), "=qm" (c) \
18825+ : __VA_ARGS__ : "memory"); \
18826+ return c != 0; \
18827+} while (0)
18828+#endif
18829+
18830+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18831 do { \
18832 char c; \
18833 asm volatile (fullop "; set" cc " %1" \
18834@@ -30,11 +88,17 @@ do { \
18835 return c != 0; \
18836 } while (0)
18837
18838-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18839- __GEN_RMWcc(op " " arg0, var, cc)
18840+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18841+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18842+
18843+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18844+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18845+
18846+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18847+ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
18848
18849-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18850- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
18851+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18852+ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
18853
18854 #endif /* CC_HAVE_ASM_GOTO */
18855
18856diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
18857index cad82c9..2e5c5c1 100644
18858--- a/arch/x86/include/asm/rwsem.h
18859+++ b/arch/x86/include/asm/rwsem.h
18860@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
18861 {
18862 asm volatile("# beginning down_read\n\t"
18863 LOCK_PREFIX _ASM_INC "(%1)\n\t"
18864+
18865+#ifdef CONFIG_PAX_REFCOUNT
18866+ "jno 0f\n"
18867+ LOCK_PREFIX _ASM_DEC "(%1)\n"
18868+ "int $4\n0:\n"
18869+ _ASM_EXTABLE(0b, 0b)
18870+#endif
18871+
18872 /* adds 0x00000001 */
18873 " jns 1f\n"
18874 " call call_rwsem_down_read_failed\n"
18875@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
18876 "1:\n\t"
18877 " mov %1,%2\n\t"
18878 " add %3,%2\n\t"
18879+
18880+#ifdef CONFIG_PAX_REFCOUNT
18881+ "jno 0f\n"
18882+ "sub %3,%2\n"
18883+ "int $4\n0:\n"
18884+ _ASM_EXTABLE(0b, 0b)
18885+#endif
18886+
18887 " jle 2f\n\t"
18888 LOCK_PREFIX " cmpxchg %2,%0\n\t"
18889 " jnz 1b\n\t"
18890@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
18891 long tmp;
18892 asm volatile("# beginning down_write\n\t"
18893 LOCK_PREFIX " xadd %1,(%2)\n\t"
18894+
18895+#ifdef CONFIG_PAX_REFCOUNT
18896+ "jno 0f\n"
18897+ "mov %1,(%2)\n"
18898+ "int $4\n0:\n"
18899+ _ASM_EXTABLE(0b, 0b)
18900+#endif
18901+
18902 /* adds 0xffff0001, returns the old value */
18903 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
18904 /* was the active mask 0 before? */
18905@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
18906 long tmp;
18907 asm volatile("# beginning __up_read\n\t"
18908 LOCK_PREFIX " xadd %1,(%2)\n\t"
18909+
18910+#ifdef CONFIG_PAX_REFCOUNT
18911+ "jno 0f\n"
18912+ "mov %1,(%2)\n"
18913+ "int $4\n0:\n"
18914+ _ASM_EXTABLE(0b, 0b)
18915+#endif
18916+
18917 /* subtracts 1, returns the old value */
18918 " jns 1f\n\t"
18919 " call call_rwsem_wake\n" /* expects old value in %edx */
18920@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
18921 long tmp;
18922 asm volatile("# beginning __up_write\n\t"
18923 LOCK_PREFIX " xadd %1,(%2)\n\t"
18924+
18925+#ifdef CONFIG_PAX_REFCOUNT
18926+ "jno 0f\n"
18927+ "mov %1,(%2)\n"
18928+ "int $4\n0:\n"
18929+ _ASM_EXTABLE(0b, 0b)
18930+#endif
18931+
18932 /* subtracts 0xffff0001, returns the old value */
18933 " jns 1f\n\t"
18934 " call call_rwsem_wake\n" /* expects old value in %edx */
18935@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18936 {
18937 asm volatile("# beginning __downgrade_write\n\t"
18938 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
18939+
18940+#ifdef CONFIG_PAX_REFCOUNT
18941+ "jno 0f\n"
18942+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
18943+ "int $4\n0:\n"
18944+ _ASM_EXTABLE(0b, 0b)
18945+#endif
18946+
18947 /*
18948 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
18949 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
18950@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18951 */
18952 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18953 {
18954- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
18955+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
18956+
18957+#ifdef CONFIG_PAX_REFCOUNT
18958+ "jno 0f\n"
18959+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
18960+ "int $4\n0:\n"
18961+ _ASM_EXTABLE(0b, 0b)
18962+#endif
18963+
18964 : "+m" (sem->count)
18965 : "er" (delta));
18966 }
18967@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18968 */
18969 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
18970 {
18971- return delta + xadd(&sem->count, delta);
18972+ return delta + xadd_check_overflow(&sem->count, delta);
18973 }
18974
18975 #endif /* __KERNEL__ */
18976diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
18977index db257a5..b91bc77 100644
18978--- a/arch/x86/include/asm/segment.h
18979+++ b/arch/x86/include/asm/segment.h
18980@@ -73,10 +73,15 @@
18981 * 26 - ESPFIX small SS
18982 * 27 - per-cpu [ offset to per-cpu data area ]
18983 * 28 - stack_canary-20 [ for stack protector ]
18984- * 29 - unused
18985- * 30 - unused
18986+ * 29 - PCI BIOS CS
18987+ * 30 - PCI BIOS DS
18988 * 31 - TSS for double fault handler
18989 */
18990+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
18991+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
18992+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
18993+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
18994+
18995 #define GDT_ENTRY_TLS_MIN 6
18996 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
18997
18998@@ -88,6 +93,8 @@
18999
19000 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
19001
19002+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
19003+
19004 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
19005
19006 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
19007@@ -113,6 +120,12 @@
19008 #define __KERNEL_STACK_CANARY 0
19009 #endif
19010
19011+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
19012+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
19013+
19014+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
19015+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
19016+
19017 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
19018
19019 /*
19020@@ -140,7 +153,7 @@
19021 */
19022
19023 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
19024-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
19025+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
19026
19027
19028 #else
19029@@ -164,6 +177,8 @@
19030 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
19031 #define __USER32_DS __USER_DS
19032
19033+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
19034+
19035 #define GDT_ENTRY_TSS 8 /* needs two entries */
19036 #define GDT_ENTRY_LDT 10 /* needs two entries */
19037 #define GDT_ENTRY_TLS_MIN 12
19038@@ -172,6 +187,8 @@
19039 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
19040 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
19041
19042+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
19043+
19044 /* TLS indexes for 64bit - hardcoded in arch_prctl */
19045 #define FS_TLS 0
19046 #define GS_TLS 1
19047@@ -179,12 +196,14 @@
19048 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
19049 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
19050
19051-#define GDT_ENTRIES 16
19052+#define GDT_ENTRIES 17
19053
19054 #endif
19055
19056 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
19057+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
19058 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
19059+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
19060 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
19061 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
19062 #ifndef CONFIG_PARAVIRT
19063@@ -256,7 +275,7 @@ static inline unsigned long get_limit(unsigned long segment)
19064 {
19065 unsigned long __limit;
19066 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
19067- return __limit + 1;
19068+ return __limit;
19069 }
19070
19071 #endif /* !__ASSEMBLY__ */
19072diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
19073index 8d3120f..352b440 100644
19074--- a/arch/x86/include/asm/smap.h
19075+++ b/arch/x86/include/asm/smap.h
19076@@ -25,11 +25,40 @@
19077
19078 #include <asm/alternative-asm.h>
19079
19080+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19081+#define ASM_PAX_OPEN_USERLAND \
19082+ 661: jmp 663f; \
19083+ .pushsection .altinstr_replacement, "a" ; \
19084+ 662: pushq %rax; nop; \
19085+ .popsection ; \
19086+ .pushsection .altinstructions, "a" ; \
19087+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19088+ .popsection ; \
19089+ call __pax_open_userland; \
19090+ popq %rax; \
19091+ 663:
19092+
19093+#define ASM_PAX_CLOSE_USERLAND \
19094+ 661: jmp 663f; \
19095+ .pushsection .altinstr_replacement, "a" ; \
19096+ 662: pushq %rax; nop; \
19097+ .popsection; \
19098+ .pushsection .altinstructions, "a" ; \
19099+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19100+ .popsection; \
19101+ call __pax_close_userland; \
19102+ popq %rax; \
19103+ 663:
19104+#else
19105+#define ASM_PAX_OPEN_USERLAND
19106+#define ASM_PAX_CLOSE_USERLAND
19107+#endif
19108+
19109 #ifdef CONFIG_X86_SMAP
19110
19111 #define ASM_CLAC \
19112 661: ASM_NOP3 ; \
19113- .pushsection .altinstr_replacement, "ax" ; \
19114+ .pushsection .altinstr_replacement, "a" ; \
19115 662: __ASM_CLAC ; \
19116 .popsection ; \
19117 .pushsection .altinstructions, "a" ; \
19118@@ -38,7 +67,7 @@
19119
19120 #define ASM_STAC \
19121 661: ASM_NOP3 ; \
19122- .pushsection .altinstr_replacement, "ax" ; \
19123+ .pushsection .altinstr_replacement, "a" ; \
19124 662: __ASM_STAC ; \
19125 .popsection ; \
19126 .pushsection .altinstructions, "a" ; \
19127@@ -56,6 +85,37 @@
19128
19129 #include <asm/alternative.h>
19130
19131+#define __HAVE_ARCH_PAX_OPEN_USERLAND
19132+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
19133+
19134+extern void __pax_open_userland(void);
19135+static __always_inline unsigned long pax_open_userland(void)
19136+{
19137+
19138+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19139+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
19140+ :
19141+ : [open] "i" (__pax_open_userland)
19142+ : "memory", "rax");
19143+#endif
19144+
19145+ return 0;
19146+}
19147+
19148+extern void __pax_close_userland(void);
19149+static __always_inline unsigned long pax_close_userland(void)
19150+{
19151+
19152+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19153+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
19154+ :
19155+ : [close] "i" (__pax_close_userland)
19156+ : "memory", "rax");
19157+#endif
19158+
19159+ return 0;
19160+}
19161+
19162 #ifdef CONFIG_X86_SMAP
19163
19164 static __always_inline void clac(void)
19165diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
19166index 8cd1cc3..827e09e 100644
19167--- a/arch/x86/include/asm/smp.h
19168+++ b/arch/x86/include/asm/smp.h
19169@@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
19170 /* cpus sharing the last level cache: */
19171 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
19172 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
19173-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
19174+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
19175
19176 static inline struct cpumask *cpu_sibling_mask(int cpu)
19177 {
19178@@ -78,7 +78,7 @@ struct smp_ops {
19179
19180 void (*send_call_func_ipi)(const struct cpumask *mask);
19181 void (*send_call_func_single_ipi)(int cpu);
19182-};
19183+} __no_const;
19184
19185 /* Globals due to paravirt */
19186 extern void set_cpu_sibling_map(int cpu);
19187@@ -191,14 +191,8 @@ extern unsigned disabled_cpus;
19188 extern int safe_smp_processor_id(void);
19189
19190 #elif defined(CONFIG_X86_64_SMP)
19191-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19192-
19193-#define stack_smp_processor_id() \
19194-({ \
19195- struct thread_info *ti; \
19196- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
19197- ti->cpu; \
19198-})
19199+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19200+#define stack_smp_processor_id() raw_smp_processor_id()
19201 #define safe_smp_processor_id() smp_processor_id()
19202
19203 #endif
19204diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
19205index 6a99859..03cb807 100644
19206--- a/arch/x86/include/asm/stackprotector.h
19207+++ b/arch/x86/include/asm/stackprotector.h
19208@@ -47,7 +47,7 @@
19209 * head_32 for boot CPU and setup_per_cpu_areas() for others.
19210 */
19211 #define GDT_STACK_CANARY_INIT \
19212- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
19213+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
19214
19215 /*
19216 * Initialize the stackprotector canary value.
19217@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
19218
19219 static inline void load_stack_canary_segment(void)
19220 {
19221-#ifdef CONFIG_X86_32
19222+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19223 asm volatile ("mov %0, %%gs" : : "r" (0));
19224 #endif
19225 }
19226diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
19227index 70bbe39..4ae2bd4 100644
19228--- a/arch/x86/include/asm/stacktrace.h
19229+++ b/arch/x86/include/asm/stacktrace.h
19230@@ -11,28 +11,20 @@
19231
19232 extern int kstack_depth_to_print;
19233
19234-struct thread_info;
19235+struct task_struct;
19236 struct stacktrace_ops;
19237
19238-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
19239- unsigned long *stack,
19240- unsigned long bp,
19241- const struct stacktrace_ops *ops,
19242- void *data,
19243- unsigned long *end,
19244- int *graph);
19245+typedef unsigned long walk_stack_t(struct task_struct *task,
19246+ void *stack_start,
19247+ unsigned long *stack,
19248+ unsigned long bp,
19249+ const struct stacktrace_ops *ops,
19250+ void *data,
19251+ unsigned long *end,
19252+ int *graph);
19253
19254-extern unsigned long
19255-print_context_stack(struct thread_info *tinfo,
19256- unsigned long *stack, unsigned long bp,
19257- const struct stacktrace_ops *ops, void *data,
19258- unsigned long *end, int *graph);
19259-
19260-extern unsigned long
19261-print_context_stack_bp(struct thread_info *tinfo,
19262- unsigned long *stack, unsigned long bp,
19263- const struct stacktrace_ops *ops, void *data,
19264- unsigned long *end, int *graph);
19265+extern walk_stack_t print_context_stack;
19266+extern walk_stack_t print_context_stack_bp;
19267
19268 /* Generic stack tracer with callbacks */
19269
19270@@ -40,7 +32,7 @@ struct stacktrace_ops {
19271 void (*address)(void *data, unsigned long address, int reliable);
19272 /* On negative return stop dumping */
19273 int (*stack)(void *data, char *name);
19274- walk_stack_t walk_stack;
19275+ walk_stack_t *walk_stack;
19276 };
19277
19278 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
19279diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
19280index 751bf4b..a1278b5 100644
19281--- a/arch/x86/include/asm/switch_to.h
19282+++ b/arch/x86/include/asm/switch_to.h
19283@@ -112,7 +112,7 @@ do { \
19284 "call __switch_to\n\t" \
19285 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
19286 __switch_canary \
19287- "movq %P[thread_info](%%rsi),%%r8\n\t" \
19288+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
19289 "movq %%rax,%%rdi\n\t" \
19290 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
19291 "jnz ret_from_fork\n\t" \
19292@@ -123,7 +123,7 @@ do { \
19293 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
19294 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
19295 [_tif_fork] "i" (_TIF_FORK), \
19296- [thread_info] "i" (offsetof(struct task_struct, stack)), \
19297+ [thread_info] "m" (current_tinfo), \
19298 [current_task] "m" (current_task) \
19299 __switch_canary_iparam \
19300 : "memory", "cc" __EXTRA_CLOBBER)
19301diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
19302index 1d4e4f2..506db18 100644
19303--- a/arch/x86/include/asm/thread_info.h
19304+++ b/arch/x86/include/asm/thread_info.h
19305@@ -24,7 +24,6 @@ struct exec_domain;
19306 #include <linux/atomic.h>
19307
19308 struct thread_info {
19309- struct task_struct *task; /* main task structure */
19310 struct exec_domain *exec_domain; /* execution domain */
19311 __u32 flags; /* low level flags */
19312 __u32 status; /* thread synchronous flags */
19313@@ -32,13 +31,13 @@ struct thread_info {
19314 int saved_preempt_count;
19315 mm_segment_t addr_limit;
19316 void __user *sysenter_return;
19317+ unsigned long lowest_stack;
19318 unsigned int sig_on_uaccess_error:1;
19319 unsigned int uaccess_err:1; /* uaccess failed */
19320 };
19321
19322-#define INIT_THREAD_INFO(tsk) \
19323+#define INIT_THREAD_INFO \
19324 { \
19325- .task = &tsk, \
19326 .exec_domain = &default_exec_domain, \
19327 .flags = 0, \
19328 .cpu = 0, \
19329@@ -46,7 +45,7 @@ struct thread_info {
19330 .addr_limit = KERNEL_DS, \
19331 }
19332
19333-#define init_thread_info (init_thread_union.thread_info)
19334+#define init_thread_info (init_thread_union.stack)
19335 #define init_stack (init_thread_union.stack)
19336
19337 #else /* !__ASSEMBLY__ */
19338@@ -86,6 +85,7 @@ struct thread_info {
19339 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
19340 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
19341 #define TIF_X32 30 /* 32-bit native x86-64 binary */
19342+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
19343
19344 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
19345 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
19346@@ -109,17 +109,18 @@ struct thread_info {
19347 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
19348 #define _TIF_ADDR32 (1 << TIF_ADDR32)
19349 #define _TIF_X32 (1 << TIF_X32)
19350+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
19351
19352 /* work to do in syscall_trace_enter() */
19353 #define _TIF_WORK_SYSCALL_ENTRY \
19354 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
19355 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
19356- _TIF_NOHZ)
19357+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19358
19359 /* work to do in syscall_trace_leave() */
19360 #define _TIF_WORK_SYSCALL_EXIT \
19361 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
19362- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
19363+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
19364
19365 /* work to do on interrupt/exception return */
19366 #define _TIF_WORK_MASK \
19367@@ -130,7 +131,7 @@ struct thread_info {
19368 /* work to do on any return to user space */
19369 #define _TIF_ALLWORK_MASK \
19370 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
19371- _TIF_NOHZ)
19372+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19373
19374 /* Only used for 64 bit */
19375 #define _TIF_DO_NOTIFY_MASK \
19376@@ -145,7 +146,6 @@ struct thread_info {
19377 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
19378
19379 #define STACK_WARN (THREAD_SIZE/8)
19380-#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
19381
19382 /*
19383 * macros/functions for gaining access to the thread information structure
19384@@ -156,12 +156,11 @@ struct thread_info {
19385
19386 DECLARE_PER_CPU(unsigned long, kernel_stack);
19387
19388+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
19389+
19390 static inline struct thread_info *current_thread_info(void)
19391 {
19392- struct thread_info *ti;
19393- ti = (void *)(this_cpu_read_stable(kernel_stack) +
19394- KERNEL_STACK_OFFSET - THREAD_SIZE);
19395- return ti;
19396+ return this_cpu_read_stable(current_tinfo);
19397 }
19398
19399 static inline unsigned long current_stack_pointer(void)
19400@@ -179,14 +178,7 @@ static inline unsigned long current_stack_pointer(void)
19401
19402 /* how to get the thread information struct from ASM */
19403 #define GET_THREAD_INFO(reg) \
19404- _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
19405- _ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
19406-
19407-/*
19408- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
19409- * a certain register (to be used in assembler memory operands).
19410- */
19411-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
19412+ _ASM_MOV PER_CPU_VAR(current_tinfo),reg ;
19413
19414 #endif
19415
19416@@ -242,5 +234,12 @@ static inline bool is_ia32_task(void)
19417 extern void arch_task_cache_init(void);
19418 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
19419 extern void arch_release_task_struct(struct task_struct *tsk);
19420+
19421+#define __HAVE_THREAD_FUNCTIONS
19422+#define task_thread_info(task) (&(task)->tinfo)
19423+#define task_stack_page(task) ((task)->stack)
19424+#define setup_thread_stack(p, org) do {} while (0)
19425+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
19426+
19427 #endif
19428 #endif /* _ASM_X86_THREAD_INFO_H */
19429diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
19430index cd79194..e7a9491 100644
19431--- a/arch/x86/include/asm/tlbflush.h
19432+++ b/arch/x86/include/asm/tlbflush.h
19433@@ -86,18 +86,44 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)
19434
19435 static inline void __native_flush_tlb(void)
19436 {
19437+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19438+ u64 descriptor[2];
19439+
19440+ descriptor[0] = PCID_KERNEL;
19441+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_NONGLOBAL) : "memory");
19442+ return;
19443+ }
19444+
19445+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19446+ if (static_cpu_has(X86_FEATURE_PCID)) {
19447+ unsigned int cpu = raw_get_cpu();
19448+
19449+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
19450+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
19451+ raw_put_cpu_no_resched();
19452+ return;
19453+ }
19454+#endif
19455+
19456 native_write_cr3(native_read_cr3());
19457 }
19458
19459 static inline void __native_flush_tlb_global_irq_disabled(void)
19460 {
19461- unsigned long cr4;
19462+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19463+ u64 descriptor[2];
19464
19465- cr4 = this_cpu_read(cpu_tlbstate.cr4);
19466- /* clear PGE */
19467- native_write_cr4(cr4 & ~X86_CR4_PGE);
19468- /* write old PGE again and flush TLBs */
19469- native_write_cr4(cr4);
19470+ descriptor[0] = PCID_KERNEL;
19471+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
19472+ } else {
19473+ unsigned long cr4;
19474+
19475+ cr4 = this_cpu_read(cpu_tlbstate.cr4);
19476+ /* clear PGE */
19477+ native_write_cr4(cr4 & ~X86_CR4_PGE);
19478+ /* write old PGE again and flush TLBs */
19479+ native_write_cr4(cr4);
19480+ }
19481 }
19482
19483 static inline void __native_flush_tlb_global(void)
19484@@ -118,6 +144,41 @@ static inline void __native_flush_tlb_global(void)
19485
19486 static inline void __native_flush_tlb_single(unsigned long addr)
19487 {
19488+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19489+ u64 descriptor[2];
19490+
19491+ descriptor[0] = PCID_KERNEL;
19492+ descriptor[1] = addr;
19493+
19494+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19495+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
19496+ if (addr < TASK_SIZE_MAX)
19497+ descriptor[1] += pax_user_shadow_base;
19498+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19499+ }
19500+
19501+ descriptor[0] = PCID_USER;
19502+ descriptor[1] = addr;
19503+#endif
19504+
19505+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19506+ return;
19507+ }
19508+
19509+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19510+ if (static_cpu_has(X86_FEATURE_PCID)) {
19511+ unsigned int cpu = raw_get_cpu();
19512+
19513+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
19514+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19515+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
19516+ raw_put_cpu_no_resched();
19517+
19518+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
19519+ addr += pax_user_shadow_base;
19520+ }
19521+#endif
19522+
19523 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19524 }
19525
19526diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
19527index ace9dec..3f9e253 100644
19528--- a/arch/x86/include/asm/uaccess.h
19529+++ b/arch/x86/include/asm/uaccess.h
19530@@ -7,6 +7,7 @@
19531 #include <linux/compiler.h>
19532 #include <linux/thread_info.h>
19533 #include <linux/string.h>
19534+#include <linux/spinlock.h>
19535 #include <asm/asm.h>
19536 #include <asm/page.h>
19537 #include <asm/smap.h>
19538@@ -29,7 +30,12 @@
19539
19540 #define get_ds() (KERNEL_DS)
19541 #define get_fs() (current_thread_info()->addr_limit)
19542+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19543+void __set_fs(mm_segment_t x);
19544+void set_fs(mm_segment_t x);
19545+#else
19546 #define set_fs(x) (current_thread_info()->addr_limit = (x))
19547+#endif
19548
19549 #define segment_eq(a, b) ((a).seg == (b).seg)
19550
19551@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
19552 * checks that the pointer is in the user space range - after calling
19553 * this function, memory access functions may still return -EFAULT.
19554 */
19555-#define access_ok(type, addr, size) \
19556- likely(!__range_not_ok(addr, size, user_addr_max()))
19557+extern int _cond_resched(void);
19558+#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
19559+#define access_ok(type, addr, size) \
19560+({ \
19561+ unsigned long __size = size; \
19562+ unsigned long __addr = (unsigned long)addr; \
19563+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
19564+ if (__ret_ao && __size) { \
19565+ unsigned long __addr_ao = __addr & PAGE_MASK; \
19566+ unsigned long __end_ao = __addr + __size - 1; \
19567+ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
19568+ while (__addr_ao <= __end_ao) { \
19569+ char __c_ao; \
19570+ __addr_ao += PAGE_SIZE; \
19571+ if (__size > PAGE_SIZE) \
19572+ _cond_resched(); \
19573+ if (__get_user(__c_ao, (char __user *)__addr)) \
19574+ break; \
19575+ if (type != VERIFY_WRITE) { \
19576+ __addr = __addr_ao; \
19577+ continue; \
19578+ } \
19579+ if (__put_user(__c_ao, (char __user *)__addr)) \
19580+ break; \
19581+ __addr = __addr_ao; \
19582+ } \
19583+ } \
19584+ } \
19585+ __ret_ao; \
19586+})
19587
19588 /*
19589 * The exception table consists of pairs of addresses relative to the
19590@@ -134,11 +168,13 @@ extern int __get_user_8(void);
19591 extern int __get_user_bad(void);
19592
19593 /*
19594- * This is a type: either unsigned long, if the argument fits into
19595- * that type, or otherwise unsigned long long.
19596+ * This is a type: either (un)signed int, if the argument fits into
19597+ * that type, or otherwise (un)signed long long.
19598 */
19599 #define __inttype(x) \
19600-__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19601+__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0U), \
19602+ __builtin_choose_expr(__type_is_unsigned(__typeof__(x)), 0ULL, 0LL),\
19603+ __builtin_choose_expr(__type_is_unsigned(__typeof__(x)), 0U, 0)))
19604
19605 /**
19606 * get_user: - Get a simple variable from user space.
19607@@ -176,10 +212,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19608 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
19609 __chk_user_ptr(ptr); \
19610 might_fault(); \
19611+ pax_open_userland(); \
19612 asm volatile("call __get_user_%P3" \
19613 : "=a" (__ret_gu), "=r" (__val_gu) \
19614 : "0" (ptr), "i" (sizeof(*(ptr)))); \
19615 (x) = (__force __typeof__(*(ptr))) __val_gu; \
19616+ pax_close_userland(); \
19617 __ret_gu; \
19618 })
19619
19620@@ -187,13 +225,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19621 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
19622 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
19623
19624-
19625+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19626+#define __copyuser_seg "gs;"
19627+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
19628+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
19629+#else
19630+#define __copyuser_seg
19631+#define __COPYUSER_SET_ES
19632+#define __COPYUSER_RESTORE_ES
19633+#endif
19634
19635 #ifdef CONFIG_X86_32
19636 #define __put_user_asm_u64(x, addr, err, errret) \
19637 asm volatile(ASM_STAC "\n" \
19638- "1: movl %%eax,0(%2)\n" \
19639- "2: movl %%edx,4(%2)\n" \
19640+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
19641+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
19642 "3: " ASM_CLAC "\n" \
19643 ".section .fixup,\"ax\"\n" \
19644 "4: movl %3,%0\n" \
19645@@ -206,8 +252,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19646
19647 #define __put_user_asm_ex_u64(x, addr) \
19648 asm volatile(ASM_STAC "\n" \
19649- "1: movl %%eax,0(%1)\n" \
19650- "2: movl %%edx,4(%1)\n" \
19651+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
19652+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
19653 "3: " ASM_CLAC "\n" \
19654 _ASM_EXTABLE_EX(1b, 2b) \
19655 _ASM_EXTABLE_EX(2b, 3b) \
19656@@ -257,7 +303,8 @@ extern void __put_user_8(void);
19657 __typeof__(*(ptr)) __pu_val; \
19658 __chk_user_ptr(ptr); \
19659 might_fault(); \
19660- __pu_val = x; \
19661+ __pu_val = (x); \
19662+ pax_open_userland(); \
19663 switch (sizeof(*(ptr))) { \
19664 case 1: \
19665 __put_user_x(1, __pu_val, ptr, __ret_pu); \
19666@@ -275,6 +322,7 @@ extern void __put_user_8(void);
19667 __put_user_x(X, __pu_val, ptr, __ret_pu); \
19668 break; \
19669 } \
19670+ pax_close_userland(); \
19671 __ret_pu; \
19672 })
19673
19674@@ -355,8 +403,10 @@ do { \
19675 } while (0)
19676
19677 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19678+do { \
19679+ pax_open_userland(); \
19680 asm volatile(ASM_STAC "\n" \
19681- "1: mov"itype" %2,%"rtype"1\n" \
19682+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
19683 "2: " ASM_CLAC "\n" \
19684 ".section .fixup,\"ax\"\n" \
19685 "3: mov %3,%0\n" \
19686@@ -364,8 +414,10 @@ do { \
19687 " jmp 2b\n" \
19688 ".previous\n" \
19689 _ASM_EXTABLE(1b, 3b) \
19690- : "=r" (err), ltype(x) \
19691- : "m" (__m(addr)), "i" (errret), "0" (err))
19692+ : "=r" (err), ltype (x) \
19693+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
19694+ pax_close_userland(); \
19695+} while (0)
19696
19697 #define __get_user_size_ex(x, ptr, size) \
19698 do { \
19699@@ -389,7 +441,7 @@ do { \
19700 } while (0)
19701
19702 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
19703- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
19704+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
19705 "2:\n" \
19706 _ASM_EXTABLE_EX(1b, 2b) \
19707 : ltype(x) : "m" (__m(addr)))
19708@@ -406,13 +458,24 @@ do { \
19709 int __gu_err; \
19710 unsigned long __gu_val; \
19711 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
19712- (x) = (__force __typeof__(*(ptr)))__gu_val; \
19713+ (x) = (__typeof__(*(ptr)))__gu_val; \
19714 __gu_err; \
19715 })
19716
19717 /* FIXME: this hack is definitely wrong -AK */
19718 struct __large_struct { unsigned long buf[100]; };
19719-#define __m(x) (*(struct __large_struct __user *)(x))
19720+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19721+#define ____m(x) \
19722+({ \
19723+ unsigned long ____x = (unsigned long)(x); \
19724+ if (____x < pax_user_shadow_base) \
19725+ ____x += pax_user_shadow_base; \
19726+ (typeof(x))____x; \
19727+})
19728+#else
19729+#define ____m(x) (x)
19730+#endif
19731+#define __m(x) (*(struct __large_struct __user *)____m(x))
19732
19733 /*
19734 * Tell gcc we read from memory instead of writing: this is because
19735@@ -420,8 +483,10 @@ struct __large_struct { unsigned long buf[100]; };
19736 * aliasing issues.
19737 */
19738 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19739+do { \
19740+ pax_open_userland(); \
19741 asm volatile(ASM_STAC "\n" \
19742- "1: mov"itype" %"rtype"1,%2\n" \
19743+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
19744 "2: " ASM_CLAC "\n" \
19745 ".section .fixup,\"ax\"\n" \
19746 "3: mov %3,%0\n" \
19747@@ -429,10 +494,12 @@ struct __large_struct { unsigned long buf[100]; };
19748 ".previous\n" \
19749 _ASM_EXTABLE(1b, 3b) \
19750 : "=r"(err) \
19751- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
19752+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
19753+ pax_close_userland(); \
19754+} while (0)
19755
19756 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
19757- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
19758+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
19759 "2:\n" \
19760 _ASM_EXTABLE_EX(1b, 2b) \
19761 : : ltype(x), "m" (__m(addr)))
19762@@ -442,11 +509,13 @@ struct __large_struct { unsigned long buf[100]; };
19763 */
19764 #define uaccess_try do { \
19765 current_thread_info()->uaccess_err = 0; \
19766+ pax_open_userland(); \
19767 stac(); \
19768 barrier();
19769
19770 #define uaccess_catch(err) \
19771 clac(); \
19772+ pax_close_userland(); \
19773 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
19774 } while (0)
19775
19776@@ -471,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
19777 * On error, the variable @x is set to zero.
19778 */
19779
19780+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19781+#define __get_user(x, ptr) get_user((x), (ptr))
19782+#else
19783 #define __get_user(x, ptr) \
19784 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
19785+#endif
19786
19787 /**
19788 * __put_user: - Write a simple value into user space, with less checking.
19789@@ -494,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
19790 * Returns zero on success, or -EFAULT on error.
19791 */
19792
19793+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19794+#define __put_user(x, ptr) put_user((x), (ptr))
19795+#else
19796 #define __put_user(x, ptr) \
19797 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
19798+#endif
19799
19800 #define __get_user_unaligned __get_user
19801 #define __put_user_unaligned __put_user
19802@@ -513,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
19803 #define get_user_ex(x, ptr) do { \
19804 unsigned long __gue_val; \
19805 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
19806- (x) = (__force __typeof__(*(ptr)))__gue_val; \
19807+ (x) = (__typeof__(*(ptr)))__gue_val; \
19808 } while (0)
19809
19810 #define put_user_try uaccess_try
19811@@ -531,7 +608,7 @@ extern __must_check long strlen_user(const char __user *str);
19812 extern __must_check long strnlen_user(const char __user *str, long n);
19813
19814 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
19815-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
19816+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
19817
19818 extern void __cmpxchg_wrong_size(void)
19819 __compiletime_error("Bad argument size for cmpxchg");
19820@@ -542,18 +619,19 @@ extern void __cmpxchg_wrong_size(void)
19821 __typeof__(ptr) __uval = (uval); \
19822 __typeof__(*(ptr)) __old = (old); \
19823 __typeof__(*(ptr)) __new = (new); \
19824+ pax_open_userland(); \
19825 switch (size) { \
19826 case 1: \
19827 { \
19828 asm volatile("\t" ASM_STAC "\n" \
19829- "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
19830+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\
19831 "2:\t" ASM_CLAC "\n" \
19832 "\t.section .fixup, \"ax\"\n" \
19833 "3:\tmov %3, %0\n" \
19834 "\tjmp 2b\n" \
19835 "\t.previous\n" \
19836 _ASM_EXTABLE(1b, 3b) \
19837- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19838+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19839 : "i" (-EFAULT), "q" (__new), "1" (__old) \
19840 : "memory" \
19841 ); \
19842@@ -562,14 +640,14 @@ extern void __cmpxchg_wrong_size(void)
19843 case 2: \
19844 { \
19845 asm volatile("\t" ASM_STAC "\n" \
19846- "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
19847+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\
19848 "2:\t" ASM_CLAC "\n" \
19849 "\t.section .fixup, \"ax\"\n" \
19850 "3:\tmov %3, %0\n" \
19851 "\tjmp 2b\n" \
19852 "\t.previous\n" \
19853 _ASM_EXTABLE(1b, 3b) \
19854- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19855+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19856 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19857 : "memory" \
19858 ); \
19859@@ -578,14 +656,14 @@ extern void __cmpxchg_wrong_size(void)
19860 case 4: \
19861 { \
19862 asm volatile("\t" ASM_STAC "\n" \
19863- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
19864+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\
19865 "2:\t" ASM_CLAC "\n" \
19866 "\t.section .fixup, \"ax\"\n" \
19867 "3:\tmov %3, %0\n" \
19868 "\tjmp 2b\n" \
19869 "\t.previous\n" \
19870 _ASM_EXTABLE(1b, 3b) \
19871- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19872+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19873 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19874 : "memory" \
19875 ); \
19876@@ -597,14 +675,14 @@ extern void __cmpxchg_wrong_size(void)
19877 __cmpxchg_wrong_size(); \
19878 \
19879 asm volatile("\t" ASM_STAC "\n" \
19880- "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
19881+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\
19882 "2:\t" ASM_CLAC "\n" \
19883 "\t.section .fixup, \"ax\"\n" \
19884 "3:\tmov %3, %0\n" \
19885 "\tjmp 2b\n" \
19886 "\t.previous\n" \
19887 _ASM_EXTABLE(1b, 3b) \
19888- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19889+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19890 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19891 : "memory" \
19892 ); \
19893@@ -613,6 +691,7 @@ extern void __cmpxchg_wrong_size(void)
19894 default: \
19895 __cmpxchg_wrong_size(); \
19896 } \
19897+ pax_close_userland(); \
19898 *__uval = __old; \
19899 __ret; \
19900 })
19901@@ -636,17 +715,6 @@ extern struct movsl_mask {
19902
19903 #define ARCH_HAS_NOCACHE_UACCESS 1
19904
19905-#ifdef CONFIG_X86_32
19906-# include <asm/uaccess_32.h>
19907-#else
19908-# include <asm/uaccess_64.h>
19909-#endif
19910-
19911-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
19912- unsigned n);
19913-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19914- unsigned n);
19915-
19916 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
19917 # define copy_user_diag __compiletime_error
19918 #else
19919@@ -656,7 +724,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19920 extern void copy_user_diag("copy_from_user() buffer size is too small")
19921 copy_from_user_overflow(void);
19922 extern void copy_user_diag("copy_to_user() buffer size is too small")
19923-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19924+copy_to_user_overflow(void);
19925
19926 #undef copy_user_diag
19927
19928@@ -669,7 +737,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
19929
19930 extern void
19931 __compiletime_warning("copy_to_user() buffer size is not provably correct")
19932-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19933+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
19934 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
19935
19936 #else
19937@@ -684,10 +752,16 @@ __copy_from_user_overflow(int size, unsigned long count)
19938
19939 #endif
19940
19941+#ifdef CONFIG_X86_32
19942+# include <asm/uaccess_32.h>
19943+#else
19944+# include <asm/uaccess_64.h>
19945+#endif
19946+
19947 static inline unsigned long __must_check
19948 copy_from_user(void *to, const void __user *from, unsigned long n)
19949 {
19950- int sz = __compiletime_object_size(to);
19951+ size_t sz = __compiletime_object_size(to);
19952
19953 might_fault();
19954
19955@@ -709,12 +783,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19956 * case, and do only runtime checking for non-constant sizes.
19957 */
19958
19959- if (likely(sz < 0 || sz >= n))
19960- n = _copy_from_user(to, from, n);
19961- else if(__builtin_constant_p(n))
19962- copy_from_user_overflow();
19963- else
19964- __copy_from_user_overflow(sz, n);
19965+ if (likely(sz != (size_t)-1 && sz < n)) {
19966+ if(__builtin_constant_p(n))
19967+ copy_from_user_overflow();
19968+ else
19969+ __copy_from_user_overflow(sz, n);
19970+ } else if (access_ok(VERIFY_READ, from, n))
19971+ n = __copy_from_user(to, from, n);
19972+ else if ((long)n > 0)
19973+ memset(to, 0, n);
19974
19975 return n;
19976 }
19977@@ -722,17 +799,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19978 static inline unsigned long __must_check
19979 copy_to_user(void __user *to, const void *from, unsigned long n)
19980 {
19981- int sz = __compiletime_object_size(from);
19982+ size_t sz = __compiletime_object_size(from);
19983
19984 might_fault();
19985
19986 /* See the comment in copy_from_user() above. */
19987- if (likely(sz < 0 || sz >= n))
19988- n = _copy_to_user(to, from, n);
19989- else if(__builtin_constant_p(n))
19990- copy_to_user_overflow();
19991- else
19992- __copy_to_user_overflow(sz, n);
19993+ if (likely(sz != (size_t)-1 && sz < n)) {
19994+ if(__builtin_constant_p(n))
19995+ copy_to_user_overflow();
19996+ else
19997+ __copy_to_user_overflow(sz, n);
19998+ } else if (access_ok(VERIFY_WRITE, to, n))
19999+ n = __copy_to_user(to, from, n);
20000
20001 return n;
20002 }
20003diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
20004index 3c03a5d..edb68ae 100644
20005--- a/arch/x86/include/asm/uaccess_32.h
20006+++ b/arch/x86/include/asm/uaccess_32.h
20007@@ -40,9 +40,14 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
20008 * anything, so this is accurate.
20009 */
20010
20011-static __always_inline unsigned long __must_check
20012+static __always_inline __size_overflow(3) unsigned long __must_check
20013 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
20014 {
20015+ if ((long)n < 0)
20016+ return n;
20017+
20018+ check_object_size(from, n, true);
20019+
20020 if (__builtin_constant_p(n)) {
20021 unsigned long ret;
20022
20023@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
20024 __copy_to_user(void __user *to, const void *from, unsigned long n)
20025 {
20026 might_fault();
20027+
20028 return __copy_to_user_inatomic(to, from, n);
20029 }
20030
20031-static __always_inline unsigned long
20032+static __always_inline __size_overflow(3) unsigned long
20033 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
20034 {
20035+ if ((long)n < 0)
20036+ return n;
20037+
20038 /* Avoid zeroing the tail if the copy fails..
20039 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
20040 * but as the zeroing behaviour is only significant when n is not
20041@@ -137,6 +146,12 @@ static __always_inline unsigned long
20042 __copy_from_user(void *to, const void __user *from, unsigned long n)
20043 {
20044 might_fault();
20045+
20046+ if ((long)n < 0)
20047+ return n;
20048+
20049+ check_object_size(to, n, false);
20050+
20051 if (__builtin_constant_p(n)) {
20052 unsigned long ret;
20053
20054@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
20055 const void __user *from, unsigned long n)
20056 {
20057 might_fault();
20058+
20059+ if ((long)n < 0)
20060+ return n;
20061+
20062 if (__builtin_constant_p(n)) {
20063 unsigned long ret;
20064
20065@@ -181,7 +200,10 @@ static __always_inline unsigned long
20066 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
20067 unsigned long n)
20068 {
20069- return __copy_from_user_ll_nocache_nozero(to, from, n);
20070+ if ((long)n < 0)
20071+ return n;
20072+
20073+ return __copy_from_user_ll_nocache_nozero(to, from, n);
20074 }
20075
20076 #endif /* _ASM_X86_UACCESS_32_H */
20077diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
20078index f2f9b39..2ae1bf8 100644
20079--- a/arch/x86/include/asm/uaccess_64.h
20080+++ b/arch/x86/include/asm/uaccess_64.h
20081@@ -10,6 +10,9 @@
20082 #include <asm/alternative.h>
20083 #include <asm/cpufeature.h>
20084 #include <asm/page.h>
20085+#include <asm/pgtable.h>
20086+
20087+#define set_fs(x) (current_thread_info()->addr_limit = (x))
20088
20089 /*
20090 * Copy To/From Userspace
20091@@ -23,8 +26,8 @@ copy_user_generic_string(void *to, const void *from, unsigned len);
20092 __must_check unsigned long
20093 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
20094
20095-static __always_inline __must_check unsigned long
20096-copy_user_generic(void *to, const void *from, unsigned len)
20097+static __always_inline __must_check __size_overflow(3) unsigned long
20098+copy_user_generic(void *to, const void *from, unsigned long len)
20099 {
20100 unsigned ret;
20101
20102@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
20103 }
20104
20105 __must_check unsigned long
20106-copy_in_user(void __user *to, const void __user *from, unsigned len);
20107+copy_in_user(void __user *to, const void __user *from, unsigned long len);
20108
20109 static __always_inline __must_check
20110-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
20111+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
20112 {
20113- int ret = 0;
20114+ size_t sz = __compiletime_object_size(dst);
20115+ unsigned ret = 0;
20116+
20117+ if (size > INT_MAX)
20118+ return size;
20119+
20120+ check_object_size(dst, size, false);
20121+
20122+#ifdef CONFIG_PAX_MEMORY_UDEREF
20123+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20124+ return size;
20125+#endif
20126+
20127+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20128+ if(__builtin_constant_p(size))
20129+ copy_from_user_overflow();
20130+ else
20131+ __copy_from_user_overflow(sz, size);
20132+ return size;
20133+ }
20134
20135 if (!__builtin_constant_p(size))
20136- return copy_user_generic(dst, (__force void *)src, size);
20137+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20138 switch (size) {
20139- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
20140+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
20141 ret, "b", "b", "=q", 1);
20142 return ret;
20143- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
20144+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
20145 ret, "w", "w", "=r", 2);
20146 return ret;
20147- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
20148+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
20149 ret, "l", "k", "=r", 4);
20150 return ret;
20151- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
20152+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20153 ret, "q", "", "=r", 8);
20154 return ret;
20155 case 10:
20156- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20157+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20158 ret, "q", "", "=r", 10);
20159 if (unlikely(ret))
20160 return ret;
20161 __get_user_asm(*(u16 *)(8 + (char *)dst),
20162- (u16 __user *)(8 + (char __user *)src),
20163+ (const u16 __user *)(8 + (const char __user *)src),
20164 ret, "w", "w", "=r", 2);
20165 return ret;
20166 case 16:
20167- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20168+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20169 ret, "q", "", "=r", 16);
20170 if (unlikely(ret))
20171 return ret;
20172 __get_user_asm(*(u64 *)(8 + (char *)dst),
20173- (u64 __user *)(8 + (char __user *)src),
20174+ (const u64 __user *)(8 + (const char __user *)src),
20175 ret, "q", "", "=r", 8);
20176 return ret;
20177 default:
20178- return copy_user_generic(dst, (__force void *)src, size);
20179+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20180 }
20181 }
20182
20183 static __always_inline __must_check
20184-int __copy_from_user(void *dst, const void __user *src, unsigned size)
20185+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
20186 {
20187 might_fault();
20188 return __copy_from_user_nocheck(dst, src, size);
20189 }
20190
20191 static __always_inline __must_check
20192-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
20193+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
20194 {
20195- int ret = 0;
20196+ size_t sz = __compiletime_object_size(src);
20197+ unsigned ret = 0;
20198+
20199+ if (size > INT_MAX)
20200+ return size;
20201+
20202+ check_object_size(src, size, true);
20203+
20204+#ifdef CONFIG_PAX_MEMORY_UDEREF
20205+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20206+ return size;
20207+#endif
20208+
20209+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20210+ if(__builtin_constant_p(size))
20211+ copy_to_user_overflow();
20212+ else
20213+ __copy_to_user_overflow(sz, size);
20214+ return size;
20215+ }
20216
20217 if (!__builtin_constant_p(size))
20218- return copy_user_generic((__force void *)dst, src, size);
20219+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20220 switch (size) {
20221- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
20222+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
20223 ret, "b", "b", "iq", 1);
20224 return ret;
20225- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
20226+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
20227 ret, "w", "w", "ir", 2);
20228 return ret;
20229- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
20230+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
20231 ret, "l", "k", "ir", 4);
20232 return ret;
20233- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
20234+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20235 ret, "q", "", "er", 8);
20236 return ret;
20237 case 10:
20238- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20239+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20240 ret, "q", "", "er", 10);
20241 if (unlikely(ret))
20242 return ret;
20243 asm("":::"memory");
20244- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
20245+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
20246 ret, "w", "w", "ir", 2);
20247 return ret;
20248 case 16:
20249- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20250+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20251 ret, "q", "", "er", 16);
20252 if (unlikely(ret))
20253 return ret;
20254 asm("":::"memory");
20255- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
20256+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
20257 ret, "q", "", "er", 8);
20258 return ret;
20259 default:
20260- return copy_user_generic((__force void *)dst, src, size);
20261+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20262 }
20263 }
20264
20265 static __always_inline __must_check
20266-int __copy_to_user(void __user *dst, const void *src, unsigned size)
20267+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
20268 {
20269 might_fault();
20270 return __copy_to_user_nocheck(dst, src, size);
20271 }
20272
20273 static __always_inline __must_check
20274-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20275+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20276 {
20277- int ret = 0;
20278+ unsigned ret = 0;
20279
20280 might_fault();
20281+
20282+ if (size > INT_MAX)
20283+ return size;
20284+
20285+#ifdef CONFIG_PAX_MEMORY_UDEREF
20286+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20287+ return size;
20288+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20289+ return size;
20290+#endif
20291+
20292 if (!__builtin_constant_p(size))
20293- return copy_user_generic((__force void *)dst,
20294- (__force void *)src, size);
20295+ return copy_user_generic((__force_kernel void *)____m(dst),
20296+ (__force_kernel const void *)____m(src), size);
20297 switch (size) {
20298 case 1: {
20299 u8 tmp;
20300- __get_user_asm(tmp, (u8 __user *)src,
20301+ __get_user_asm(tmp, (const u8 __user *)src,
20302 ret, "b", "b", "=q", 1);
20303 if (likely(!ret))
20304 __put_user_asm(tmp, (u8 __user *)dst,
20305@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20306 }
20307 case 2: {
20308 u16 tmp;
20309- __get_user_asm(tmp, (u16 __user *)src,
20310+ __get_user_asm(tmp, (const u16 __user *)src,
20311 ret, "w", "w", "=r", 2);
20312 if (likely(!ret))
20313 __put_user_asm(tmp, (u16 __user *)dst,
20314@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20315
20316 case 4: {
20317 u32 tmp;
20318- __get_user_asm(tmp, (u32 __user *)src,
20319+ __get_user_asm(tmp, (const u32 __user *)src,
20320 ret, "l", "k", "=r", 4);
20321 if (likely(!ret))
20322 __put_user_asm(tmp, (u32 __user *)dst,
20323@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20324 }
20325 case 8: {
20326 u64 tmp;
20327- __get_user_asm(tmp, (u64 __user *)src,
20328+ __get_user_asm(tmp, (const u64 __user *)src,
20329 ret, "q", "", "=r", 8);
20330 if (likely(!ret))
20331 __put_user_asm(tmp, (u64 __user *)dst,
20332@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20333 return ret;
20334 }
20335 default:
20336- return copy_user_generic((__force void *)dst,
20337- (__force void *)src, size);
20338+ return copy_user_generic((__force_kernel void *)____m(dst),
20339+ (__force_kernel const void *)____m(src), size);
20340 }
20341 }
20342
20343-static __must_check __always_inline int
20344-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
20345+static __must_check __always_inline unsigned long
20346+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
20347 {
20348 return __copy_from_user_nocheck(dst, src, size);
20349 }
20350
20351-static __must_check __always_inline int
20352-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
20353+static __must_check __always_inline unsigned long
20354+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
20355 {
20356 return __copy_to_user_nocheck(dst, src, size);
20357 }
20358
20359-extern long __copy_user_nocache(void *dst, const void __user *src,
20360- unsigned size, int zerorest);
20361+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
20362+ unsigned long size, int zerorest);
20363
20364-static inline int
20365-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
20366+static inline unsigned long
20367+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
20368 {
20369 might_fault();
20370+
20371+ if (size > INT_MAX)
20372+ return size;
20373+
20374+#ifdef CONFIG_PAX_MEMORY_UDEREF
20375+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20376+ return size;
20377+#endif
20378+
20379 return __copy_user_nocache(dst, src, size, 1);
20380 }
20381
20382-static inline int
20383+static inline unsigned long
20384 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
20385- unsigned size)
20386+ unsigned long size)
20387 {
20388+ if (size > INT_MAX)
20389+ return size;
20390+
20391+#ifdef CONFIG_PAX_MEMORY_UDEREF
20392+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20393+ return size;
20394+#endif
20395+
20396 return __copy_user_nocache(dst, src, size, 0);
20397 }
20398
20399 unsigned long
20400-copy_user_handle_tail(char *to, char *from, unsigned len);
20401+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len) __size_overflow(3);
20402
20403 #endif /* _ASM_X86_UACCESS_64_H */
20404diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
20405index 5b238981..77fdd78 100644
20406--- a/arch/x86/include/asm/word-at-a-time.h
20407+++ b/arch/x86/include/asm/word-at-a-time.h
20408@@ -11,7 +11,7 @@
20409 * and shift, for example.
20410 */
20411 struct word_at_a_time {
20412- const unsigned long one_bits, high_bits;
20413+ unsigned long one_bits, high_bits;
20414 };
20415
20416 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
20417diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
20418index f58a9c7..dc378042a 100644
20419--- a/arch/x86/include/asm/x86_init.h
20420+++ b/arch/x86/include/asm/x86_init.h
20421@@ -129,7 +129,7 @@ struct x86_init_ops {
20422 struct x86_init_timers timers;
20423 struct x86_init_iommu iommu;
20424 struct x86_init_pci pci;
20425-};
20426+} __no_const;
20427
20428 /**
20429 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
20430@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
20431 void (*setup_percpu_clockev)(void);
20432 void (*early_percpu_clock_init)(void);
20433 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
20434-};
20435+} __no_const;
20436
20437 struct timespec;
20438
20439@@ -168,7 +168,7 @@ struct x86_platform_ops {
20440 void (*save_sched_clock_state)(void);
20441 void (*restore_sched_clock_state)(void);
20442 void (*apic_post_init)(void);
20443-};
20444+} __no_const;
20445
20446 struct pci_dev;
20447 struct msi_msg;
20448@@ -182,7 +182,7 @@ struct x86_msi_ops {
20449 void (*teardown_msi_irqs)(struct pci_dev *dev);
20450 void (*restore_msi_irqs)(struct pci_dev *dev);
20451 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
20452-};
20453+} __no_const;
20454
20455 struct IO_APIC_route_entry;
20456 struct io_apic_irq_attr;
20457@@ -203,7 +203,7 @@ struct x86_io_apic_ops {
20458 unsigned int destination, int vector,
20459 struct io_apic_irq_attr *attr);
20460 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
20461-};
20462+} __no_const;
20463
20464 extern struct x86_init_ops x86_init;
20465 extern struct x86_cpuinit_ops x86_cpuinit;
20466diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
20467index 358dcd3..23c0bf1 100644
20468--- a/arch/x86/include/asm/xen/page.h
20469+++ b/arch/x86/include/asm/xen/page.h
20470@@ -82,7 +82,7 @@ static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
20471 * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
20472 * cases needing an extended handling.
20473 */
20474-static inline unsigned long __pfn_to_mfn(unsigned long pfn)
20475+static inline unsigned long __intentional_overflow(-1) __pfn_to_mfn(unsigned long pfn)
20476 {
20477 unsigned long mfn;
20478
20479diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
20480index c9a6d68..cb57f42 100644
20481--- a/arch/x86/include/asm/xsave.h
20482+++ b/arch/x86/include/asm/xsave.h
20483@@ -223,12 +223,16 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20484 if (unlikely(err))
20485 return -EFAULT;
20486
20487+ pax_open_userland();
20488 __asm__ __volatile__(ASM_STAC "\n"
20489- "1:"XSAVE"\n"
20490+ "1:"
20491+ __copyuser_seg
20492+ XSAVE"\n"
20493 "2: " ASM_CLAC "\n"
20494 xstate_fault
20495 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
20496 : "memory");
20497+ pax_close_userland();
20498 return err;
20499 }
20500
20501@@ -238,16 +242,20 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20502 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20503 {
20504 int err = 0;
20505- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
20506+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
20507 u32 lmask = mask;
20508 u32 hmask = mask >> 32;
20509
20510+ pax_open_userland();
20511 __asm__ __volatile__(ASM_STAC "\n"
20512- "1:"XRSTOR"\n"
20513+ "1:"
20514+ __copyuser_seg
20515+ XRSTOR"\n"
20516 "2: " ASM_CLAC "\n"
20517 xstate_fault
20518 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
20519 : "memory"); /* memory required? */
20520+ pax_close_userland();
20521 return err;
20522 }
20523
20524diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
20525index d993e33..8db1b18 100644
20526--- a/arch/x86/include/uapi/asm/e820.h
20527+++ b/arch/x86/include/uapi/asm/e820.h
20528@@ -58,7 +58,7 @@ struct e820map {
20529 #define ISA_START_ADDRESS 0xa0000
20530 #define ISA_END_ADDRESS 0x100000
20531
20532-#define BIOS_BEGIN 0x000a0000
20533+#define BIOS_BEGIN 0x000c0000
20534 #define BIOS_END 0x00100000
20535
20536 #define BIOS_ROM_BASE 0xffe00000
20537diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
20538index 7b0a55a..ad115bf 100644
20539--- a/arch/x86/include/uapi/asm/ptrace-abi.h
20540+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
20541@@ -49,7 +49,6 @@
20542 #define EFLAGS 144
20543 #define RSP 152
20544 #define SS 160
20545-#define ARGOFFSET R11
20546 #endif /* __ASSEMBLY__ */
20547
20548 /* top of stack page */
20549diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
20550index cdb1b70..426434c 100644
20551--- a/arch/x86/kernel/Makefile
20552+++ b/arch/x86/kernel/Makefile
20553@@ -28,7 +28,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
20554 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
20555 obj-$(CONFIG_IRQ_WORK) += irq_work.o
20556 obj-y += probe_roms.o
20557-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
20558+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
20559 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
20560 obj-$(CONFIG_X86_64) += mcount_64.o
20561 obj-y += syscall_$(BITS).o vsyscall_gtod.o
20562diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
20563index 803b684..68c64f1 100644
20564--- a/arch/x86/kernel/acpi/boot.c
20565+++ b/arch/x86/kernel/acpi/boot.c
20566@@ -1361,7 +1361,7 @@ static void __init acpi_reduced_hw_init(void)
20567 * If your system is blacklisted here, but you find that acpi=force
20568 * works for you, please contact linux-acpi@vger.kernel.org
20569 */
20570-static struct dmi_system_id __initdata acpi_dmi_table[] = {
20571+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
20572 /*
20573 * Boxes that need ACPI disabled
20574 */
20575@@ -1436,7 +1436,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
20576 };
20577
20578 /* second table for DMI checks that should run after early-quirks */
20579-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
20580+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
20581 /*
20582 * HP laptops which use a DSDT reporting as HP/SB400/10000,
20583 * which includes some code which overrides all temperature
20584diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
20585index d1daead..acd77e2 100644
20586--- a/arch/x86/kernel/acpi/sleep.c
20587+++ b/arch/x86/kernel/acpi/sleep.c
20588@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
20589 #else /* CONFIG_64BIT */
20590 #ifdef CONFIG_SMP
20591 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
20592+
20593+ pax_open_kernel();
20594 early_gdt_descr.address =
20595 (unsigned long)get_cpu_gdt_table(smp_processor_id());
20596+ pax_close_kernel();
20597+
20598 initial_gs = per_cpu_offset(smp_processor_id());
20599 #endif
20600 initial_code = (unsigned long)wakeup_long64;
20601diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
20602index 665c6b7..eae4d56 100644
20603--- a/arch/x86/kernel/acpi/wakeup_32.S
20604+++ b/arch/x86/kernel/acpi/wakeup_32.S
20605@@ -29,13 +29,11 @@ wakeup_pmode_return:
20606 # and restore the stack ... but you need gdt for this to work
20607 movl saved_context_esp, %esp
20608
20609- movl %cs:saved_magic, %eax
20610- cmpl $0x12345678, %eax
20611+ cmpl $0x12345678, saved_magic
20612 jne bogus_magic
20613
20614 # jump to place where we left off
20615- movl saved_eip, %eax
20616- jmp *%eax
20617+ jmp *(saved_eip)
20618
20619 bogus_magic:
20620 jmp bogus_magic
20621diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
20622index 703130f..27a155d 100644
20623--- a/arch/x86/kernel/alternative.c
20624+++ b/arch/x86/kernel/alternative.c
20625@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20626 */
20627 for (a = start; a < end; a++) {
20628 instr = (u8 *)&a->instr_offset + a->instr_offset;
20629+
20630+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20631+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20632+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20633+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20634+#endif
20635+
20636 replacement = (u8 *)&a->repl_offset + a->repl_offset;
20637 BUG_ON(a->replacementlen > a->instrlen);
20638 BUG_ON(a->instrlen > sizeof(insnbuf));
20639@@ -284,6 +291,11 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20640 add_nops(insnbuf + a->replacementlen,
20641 a->instrlen - a->replacementlen);
20642
20643+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20644+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20645+ instr = ktva_ktla(instr);
20646+#endif
20647+
20648 text_poke_early(instr, insnbuf, a->instrlen);
20649 }
20650 }
20651@@ -299,10 +311,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
20652 for (poff = start; poff < end; poff++) {
20653 u8 *ptr = (u8 *)poff + *poff;
20654
20655+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20656+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20657+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20658+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20659+#endif
20660+
20661 if (!*poff || ptr < text || ptr >= text_end)
20662 continue;
20663 /* turn DS segment override prefix into lock prefix */
20664- if (*ptr == 0x3e)
20665+ if (*ktla_ktva(ptr) == 0x3e)
20666 text_poke(ptr, ((unsigned char []){0xf0}), 1);
20667 }
20668 mutex_unlock(&text_mutex);
20669@@ -317,10 +335,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
20670 for (poff = start; poff < end; poff++) {
20671 u8 *ptr = (u8 *)poff + *poff;
20672
20673+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20674+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20675+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20676+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20677+#endif
20678+
20679 if (!*poff || ptr < text || ptr >= text_end)
20680 continue;
20681 /* turn lock prefix into DS segment override prefix */
20682- if (*ptr == 0xf0)
20683+ if (*ktla_ktva(ptr) == 0xf0)
20684 text_poke(ptr, ((unsigned char []){0x3E}), 1);
20685 }
20686 mutex_unlock(&text_mutex);
20687@@ -457,7 +481,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
20688
20689 BUG_ON(p->len > MAX_PATCH_LEN);
20690 /* prep the buffer with the original instructions */
20691- memcpy(insnbuf, p->instr, p->len);
20692+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
20693 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
20694 (unsigned long)p->instr, p->len);
20695
20696@@ -504,7 +528,7 @@ void __init alternative_instructions(void)
20697 if (!uniproc_patched || num_possible_cpus() == 1)
20698 free_init_pages("SMP alternatives",
20699 (unsigned long)__smp_locks,
20700- (unsigned long)__smp_locks_end);
20701+ PAGE_ALIGN((unsigned long)__smp_locks_end));
20702 #endif
20703
20704 apply_paravirt(__parainstructions, __parainstructions_end);
20705@@ -524,13 +548,17 @@ void __init alternative_instructions(void)
20706 * instructions. And on the local CPU you need to be protected again NMI or MCE
20707 * handlers seeing an inconsistent instruction while you patch.
20708 */
20709-void *__init_or_module text_poke_early(void *addr, const void *opcode,
20710+void *__kprobes text_poke_early(void *addr, const void *opcode,
20711 size_t len)
20712 {
20713 unsigned long flags;
20714 local_irq_save(flags);
20715- memcpy(addr, opcode, len);
20716+
20717+ pax_open_kernel();
20718+ memcpy(ktla_ktva(addr), opcode, len);
20719 sync_core();
20720+ pax_close_kernel();
20721+
20722 local_irq_restore(flags);
20723 /* Could also do a CLFLUSH here to speed up CPU recovery; but
20724 that causes hangs on some VIA CPUs. */
20725@@ -552,36 +580,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
20726 */
20727 void *text_poke(void *addr, const void *opcode, size_t len)
20728 {
20729- unsigned long flags;
20730- char *vaddr;
20731+ unsigned char *vaddr = ktla_ktva(addr);
20732 struct page *pages[2];
20733- int i;
20734+ size_t i;
20735
20736 if (!core_kernel_text((unsigned long)addr)) {
20737- pages[0] = vmalloc_to_page(addr);
20738- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
20739+ pages[0] = vmalloc_to_page(vaddr);
20740+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
20741 } else {
20742- pages[0] = virt_to_page(addr);
20743+ pages[0] = virt_to_page(vaddr);
20744 WARN_ON(!PageReserved(pages[0]));
20745- pages[1] = virt_to_page(addr + PAGE_SIZE);
20746+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
20747 }
20748 BUG_ON(!pages[0]);
20749- local_irq_save(flags);
20750- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
20751- if (pages[1])
20752- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
20753- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
20754- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
20755- clear_fixmap(FIX_TEXT_POKE0);
20756- if (pages[1])
20757- clear_fixmap(FIX_TEXT_POKE1);
20758- local_flush_tlb();
20759- sync_core();
20760- /* Could also do a CLFLUSH here to speed up CPU recovery; but
20761- that causes hangs on some VIA CPUs. */
20762+ text_poke_early(addr, opcode, len);
20763 for (i = 0; i < len; i++)
20764- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
20765- local_irq_restore(flags);
20766+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
20767 return addr;
20768 }
20769
20770@@ -601,7 +615,7 @@ int poke_int3_handler(struct pt_regs *regs)
20771 if (likely(!bp_patching_in_progress))
20772 return 0;
20773
20774- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
20775+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
20776 return 0;
20777
20778 /* set up the specified breakpoint handler */
20779@@ -635,7 +649,7 @@ int poke_int3_handler(struct pt_regs *regs)
20780 */
20781 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
20782 {
20783- unsigned char int3 = 0xcc;
20784+ const unsigned char int3 = 0xcc;
20785
20786 bp_int3_handler = handler;
20787 bp_int3_addr = (u8 *)addr + sizeof(int3);
20788diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
20789index ad3639a..bd4253c 100644
20790--- a/arch/x86/kernel/apic/apic.c
20791+++ b/arch/x86/kernel/apic/apic.c
20792@@ -171,7 +171,7 @@ int first_system_vector = FIRST_SYSTEM_VECTOR;
20793 /*
20794 * Debug level, exported for io_apic.c
20795 */
20796-unsigned int apic_verbosity;
20797+int apic_verbosity;
20798
20799 int pic_mode;
20800
20801@@ -1918,7 +1918,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
20802 apic_write(APIC_ESR, 0);
20803 v = apic_read(APIC_ESR);
20804 ack_APIC_irq();
20805- atomic_inc(&irq_err_count);
20806+ atomic_inc_unchecked(&irq_err_count);
20807
20808 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
20809 smp_processor_id(), v);
20810diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
20811index de918c4..32eed23 100644
20812--- a/arch/x86/kernel/apic/apic_flat_64.c
20813+++ b/arch/x86/kernel/apic/apic_flat_64.c
20814@@ -154,7 +154,7 @@ static int flat_probe(void)
20815 return 1;
20816 }
20817
20818-static struct apic apic_flat = {
20819+static struct apic apic_flat __read_only = {
20820 .name = "flat",
20821 .probe = flat_probe,
20822 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
20823@@ -260,7 +260,7 @@ static int physflat_probe(void)
20824 return 0;
20825 }
20826
20827-static struct apic apic_physflat = {
20828+static struct apic apic_physflat __read_only = {
20829
20830 .name = "physical flat",
20831 .probe = physflat_probe,
20832diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
20833index b205cdb..d8503ff 100644
20834--- a/arch/x86/kernel/apic/apic_noop.c
20835+++ b/arch/x86/kernel/apic/apic_noop.c
20836@@ -108,7 +108,7 @@ static void noop_apic_write(u32 reg, u32 v)
20837 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
20838 }
20839
20840-struct apic apic_noop = {
20841+struct apic apic_noop __read_only = {
20842 .name = "noop",
20843 .probe = noop_probe,
20844 .acpi_madt_oem_check = NULL,
20845diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
20846index c4a8d63..fe893ac 100644
20847--- a/arch/x86/kernel/apic/bigsmp_32.c
20848+++ b/arch/x86/kernel/apic/bigsmp_32.c
20849@@ -147,7 +147,7 @@ static int probe_bigsmp(void)
20850 return dmi_bigsmp;
20851 }
20852
20853-static struct apic apic_bigsmp = {
20854+static struct apic apic_bigsmp __read_only = {
20855
20856 .name = "bigsmp",
20857 .probe = probe_bigsmp,
20858diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
20859index f4dc246..fbab133 100644
20860--- a/arch/x86/kernel/apic/io_apic.c
20861+++ b/arch/x86/kernel/apic/io_apic.c
20862@@ -1862,7 +1862,7 @@ int native_ioapic_set_affinity(struct irq_data *data,
20863 return ret;
20864 }
20865
20866-atomic_t irq_mis_count;
20867+atomic_unchecked_t irq_mis_count;
20868
20869 #ifdef CONFIG_GENERIC_PENDING_IRQ
20870 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
20871@@ -2003,7 +2003,7 @@ static void ack_ioapic_level(struct irq_data *data)
20872 * at the cpu.
20873 */
20874 if (!(v & (1 << (i & 0x1f)))) {
20875- atomic_inc(&irq_mis_count);
20876+ atomic_inc_unchecked(&irq_mis_count);
20877
20878 eoi_ioapic_irq(irq, cfg);
20879 }
20880@@ -2011,7 +2011,7 @@ static void ack_ioapic_level(struct irq_data *data)
20881 ioapic_irqd_unmask(data, cfg, masked);
20882 }
20883
20884-static struct irq_chip ioapic_chip __read_mostly = {
20885+static struct irq_chip ioapic_chip = {
20886 .name = "IO-APIC",
20887 .irq_startup = startup_ioapic_irq,
20888 .irq_mask = mask_ioapic_irq,
20889@@ -2070,7 +2070,7 @@ static void ack_lapic_irq(struct irq_data *data)
20890 ack_APIC_irq();
20891 }
20892
20893-static struct irq_chip lapic_chip __read_mostly = {
20894+static struct irq_chip lapic_chip = {
20895 .name = "local-APIC",
20896 .irq_mask = mask_lapic_irq,
20897 .irq_unmask = unmask_lapic_irq,
20898diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
20899index bda4886..f9c7195 100644
20900--- a/arch/x86/kernel/apic/probe_32.c
20901+++ b/arch/x86/kernel/apic/probe_32.c
20902@@ -72,7 +72,7 @@ static int probe_default(void)
20903 return 1;
20904 }
20905
20906-static struct apic apic_default = {
20907+static struct apic apic_default __read_only = {
20908
20909 .name = "default",
20910 .probe = probe_default,
20911diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
20912index 6cedd79..023ff8e 100644
20913--- a/arch/x86/kernel/apic/vector.c
20914+++ b/arch/x86/kernel/apic/vector.c
20915@@ -21,7 +21,7 @@
20916
20917 static DEFINE_RAW_SPINLOCK(vector_lock);
20918
20919-void lock_vector_lock(void)
20920+void lock_vector_lock(void) __acquires(vector_lock)
20921 {
20922 /* Used to the online set of cpus does not change
20923 * during assign_irq_vector.
20924@@ -29,7 +29,7 @@ void lock_vector_lock(void)
20925 raw_spin_lock(&vector_lock);
20926 }
20927
20928-void unlock_vector_lock(void)
20929+void unlock_vector_lock(void) __releases(vector_lock)
20930 {
20931 raw_spin_unlock(&vector_lock);
20932 }
20933diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
20934index e658f21..b695a1a 100644
20935--- a/arch/x86/kernel/apic/x2apic_cluster.c
20936+++ b/arch/x86/kernel/apic/x2apic_cluster.c
20937@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
20938 return notifier_from_errno(err);
20939 }
20940
20941-static struct notifier_block __refdata x2apic_cpu_notifier = {
20942+static struct notifier_block x2apic_cpu_notifier = {
20943 .notifier_call = update_clusterinfo,
20944 };
20945
20946@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
20947 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
20948 }
20949
20950-static struct apic apic_x2apic_cluster = {
20951+static struct apic apic_x2apic_cluster __read_only = {
20952
20953 .name = "cluster x2apic",
20954 .probe = x2apic_cluster_probe,
20955diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
20956index 6fae733..5ca17af 100644
20957--- a/arch/x86/kernel/apic/x2apic_phys.c
20958+++ b/arch/x86/kernel/apic/x2apic_phys.c
20959@@ -88,7 +88,7 @@ static int x2apic_phys_probe(void)
20960 return apic == &apic_x2apic_phys;
20961 }
20962
20963-static struct apic apic_x2apic_phys = {
20964+static struct apic apic_x2apic_phys __read_only = {
20965
20966 .name = "physical x2apic",
20967 .probe = x2apic_phys_probe,
20968diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
20969index 8e9dcfd..c61b3e4 100644
20970--- a/arch/x86/kernel/apic/x2apic_uv_x.c
20971+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
20972@@ -348,7 +348,7 @@ static int uv_probe(void)
20973 return apic == &apic_x2apic_uv_x;
20974 }
20975
20976-static struct apic __refdata apic_x2apic_uv_x = {
20977+static struct apic apic_x2apic_uv_x __read_only = {
20978
20979 .name = "UV large system",
20980 .probe = uv_probe,
20981diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
20982index 927ec92..de68f32 100644
20983--- a/arch/x86/kernel/apm_32.c
20984+++ b/arch/x86/kernel/apm_32.c
20985@@ -432,7 +432,7 @@ static DEFINE_MUTEX(apm_mutex);
20986 * This is for buggy BIOS's that refer to (real mode) segment 0x40
20987 * even though they are called in protected mode.
20988 */
20989-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
20990+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
20991 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
20992
20993 static const char driver_version[] = "1.16ac"; /* no spaces */
20994@@ -610,7 +610,10 @@ static long __apm_bios_call(void *_call)
20995 BUG_ON(cpu != 0);
20996 gdt = get_cpu_gdt_table(cpu);
20997 save_desc_40 = gdt[0x40 / 8];
20998+
20999+ pax_open_kernel();
21000 gdt[0x40 / 8] = bad_bios_desc;
21001+ pax_close_kernel();
21002
21003 apm_irq_save(flags);
21004 APM_DO_SAVE_SEGS;
21005@@ -619,7 +622,11 @@ static long __apm_bios_call(void *_call)
21006 &call->esi);
21007 APM_DO_RESTORE_SEGS;
21008 apm_irq_restore(flags);
21009+
21010+ pax_open_kernel();
21011 gdt[0x40 / 8] = save_desc_40;
21012+ pax_close_kernel();
21013+
21014 put_cpu();
21015
21016 return call->eax & 0xff;
21017@@ -686,7 +693,10 @@ static long __apm_bios_call_simple(void *_call)
21018 BUG_ON(cpu != 0);
21019 gdt = get_cpu_gdt_table(cpu);
21020 save_desc_40 = gdt[0x40 / 8];
21021+
21022+ pax_open_kernel();
21023 gdt[0x40 / 8] = bad_bios_desc;
21024+ pax_close_kernel();
21025
21026 apm_irq_save(flags);
21027 APM_DO_SAVE_SEGS;
21028@@ -694,7 +704,11 @@ static long __apm_bios_call_simple(void *_call)
21029 &call->eax);
21030 APM_DO_RESTORE_SEGS;
21031 apm_irq_restore(flags);
21032+
21033+ pax_open_kernel();
21034 gdt[0x40 / 8] = save_desc_40;
21035+ pax_close_kernel();
21036+
21037 put_cpu();
21038 return error;
21039 }
21040@@ -2039,7 +2053,7 @@ static int __init swab_apm_power_in_minutes(const struct dmi_system_id *d)
21041 return 0;
21042 }
21043
21044-static struct dmi_system_id __initdata apm_dmi_table[] = {
21045+static const struct dmi_system_id __initconst apm_dmi_table[] = {
21046 {
21047 print_if_true,
21048 KERN_WARNING "IBM T23 - BIOS 1.03b+ and controller firmware 1.02+ may be needed for Linux APM.",
21049@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
21050 * code to that CPU.
21051 */
21052 gdt = get_cpu_gdt_table(0);
21053+
21054+ pax_open_kernel();
21055 set_desc_base(&gdt[APM_CS >> 3],
21056 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
21057 set_desc_base(&gdt[APM_CS_16 >> 3],
21058 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
21059 set_desc_base(&gdt[APM_DS >> 3],
21060 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
21061+ pax_close_kernel();
21062
21063 proc_create("apm", 0, NULL, &apm_file_ops);
21064
21065diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
21066index 9f6b934..cf5ffb3 100644
21067--- a/arch/x86/kernel/asm-offsets.c
21068+++ b/arch/x86/kernel/asm-offsets.c
21069@@ -32,6 +32,8 @@ void common(void) {
21070 OFFSET(TI_flags, thread_info, flags);
21071 OFFSET(TI_status, thread_info, status);
21072 OFFSET(TI_addr_limit, thread_info, addr_limit);
21073+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
21074+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
21075
21076 BLANK();
21077 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
21078@@ -52,8 +54,26 @@ void common(void) {
21079 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
21080 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
21081 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
21082+
21083+#ifdef CONFIG_PAX_KERNEXEC
21084+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
21085 #endif
21086
21087+#ifdef CONFIG_PAX_MEMORY_UDEREF
21088+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
21089+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
21090+#ifdef CONFIG_X86_64
21091+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
21092+#endif
21093+#endif
21094+
21095+#endif
21096+
21097+ BLANK();
21098+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
21099+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
21100+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
21101+
21102 #ifdef CONFIG_XEN
21103 BLANK();
21104 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
21105diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
21106index fdcbb4d..036dd93 100644
21107--- a/arch/x86/kernel/asm-offsets_64.c
21108+++ b/arch/x86/kernel/asm-offsets_64.c
21109@@ -80,6 +80,7 @@ int main(void)
21110 BLANK();
21111 #undef ENTRY
21112
21113+ DEFINE(TSS_size, sizeof(struct tss_struct));
21114 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
21115 BLANK();
21116
21117diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
21118index 80091ae..0c5184f 100644
21119--- a/arch/x86/kernel/cpu/Makefile
21120+++ b/arch/x86/kernel/cpu/Makefile
21121@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
21122 CFLAGS_REMOVE_perf_event.o = -pg
21123 endif
21124
21125-# Make sure load_percpu_segment has no stackprotector
21126-nostackp := $(call cc-option, -fno-stack-protector)
21127-CFLAGS_common.o := $(nostackp)
21128-
21129 obj-y := intel_cacheinfo.o scattered.o topology.o
21130 obj-y += common.o
21131 obj-y += rdrand.o
21132diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
21133index a220239..607fc38 100644
21134--- a/arch/x86/kernel/cpu/amd.c
21135+++ b/arch/x86/kernel/cpu/amd.c
21136@@ -717,7 +717,7 @@ static void init_amd(struct cpuinfo_x86 *c)
21137 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
21138 {
21139 /* AMD errata T13 (order #21922) */
21140- if ((c->x86 == 6)) {
21141+ if (c->x86 == 6) {
21142 /* Duron Rev A0 */
21143 if (c->x86_model == 3 && c->x86_mask == 0)
21144 size = 64;
21145diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
21146index 2346c95..c061472 100644
21147--- a/arch/x86/kernel/cpu/common.c
21148+++ b/arch/x86/kernel/cpu/common.c
21149@@ -91,60 +91,6 @@ static const struct cpu_dev default_cpu = {
21150
21151 static const struct cpu_dev *this_cpu = &default_cpu;
21152
21153-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
21154-#ifdef CONFIG_X86_64
21155- /*
21156- * We need valid kernel segments for data and code in long mode too
21157- * IRET will check the segment types kkeil 2000/10/28
21158- * Also sysret mandates a special GDT layout
21159- *
21160- * TLS descriptors are currently at a different place compared to i386.
21161- * Hopefully nobody expects them at a fixed place (Wine?)
21162- */
21163- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
21164- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
21165- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
21166- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
21167- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
21168- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
21169-#else
21170- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
21171- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21172- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
21173- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
21174- /*
21175- * Segments used for calling PnP BIOS have byte granularity.
21176- * They code segments and data segments have fixed 64k limits,
21177- * the transfer segment sizes are set at run time.
21178- */
21179- /* 32-bit code */
21180- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21181- /* 16-bit code */
21182- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21183- /* 16-bit data */
21184- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
21185- /* 16-bit data */
21186- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
21187- /* 16-bit data */
21188- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
21189- /*
21190- * The APM segments have byte granularity and their bases
21191- * are set at run time. All have 64k limits.
21192- */
21193- /* 32-bit code */
21194- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21195- /* 16-bit code */
21196- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21197- /* data */
21198- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
21199-
21200- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21201- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21202- GDT_STACK_CANARY_INIT
21203-#endif
21204-} };
21205-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
21206-
21207 static int __init x86_xsave_setup(char *s)
21208 {
21209 if (strlen(s))
21210@@ -306,6 +252,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
21211 }
21212 }
21213
21214+#ifdef CONFIG_X86_64
21215+static __init int setup_disable_pcid(char *arg)
21216+{
21217+ setup_clear_cpu_cap(X86_FEATURE_PCID);
21218+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
21219+
21220+#ifdef CONFIG_PAX_MEMORY_UDEREF
21221+ if (clone_pgd_mask != ~(pgdval_t)0UL)
21222+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21223+#endif
21224+
21225+ return 1;
21226+}
21227+__setup("nopcid", setup_disable_pcid);
21228+
21229+static void setup_pcid(struct cpuinfo_x86 *c)
21230+{
21231+ if (!cpu_has(c, X86_FEATURE_PCID)) {
21232+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
21233+
21234+#ifdef CONFIG_PAX_MEMORY_UDEREF
21235+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
21236+ pax_open_kernel();
21237+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21238+ pax_close_kernel();
21239+ printk("PAX: slow and weak UDEREF enabled\n");
21240+ } else
21241+ printk("PAX: UDEREF disabled\n");
21242+#endif
21243+
21244+ return;
21245+ }
21246+
21247+ printk("PAX: PCID detected\n");
21248+ cr4_set_bits(X86_CR4_PCIDE);
21249+
21250+#ifdef CONFIG_PAX_MEMORY_UDEREF
21251+ pax_open_kernel();
21252+ clone_pgd_mask = ~(pgdval_t)0UL;
21253+ pax_close_kernel();
21254+ if (pax_user_shadow_base)
21255+ printk("PAX: weak UDEREF enabled\n");
21256+ else {
21257+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
21258+ printk("PAX: strong UDEREF enabled\n");
21259+ }
21260+#endif
21261+
21262+ if (cpu_has(c, X86_FEATURE_INVPCID))
21263+ printk("PAX: INVPCID detected\n");
21264+}
21265+#endif
21266+
21267 /*
21268 * Some CPU features depend on higher CPUID levels, which may not always
21269 * be available due to CPUID level capping or broken virtualization
21270@@ -406,7 +405,7 @@ void switch_to_new_gdt(int cpu)
21271 {
21272 struct desc_ptr gdt_descr;
21273
21274- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
21275+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
21276 gdt_descr.size = GDT_SIZE - 1;
21277 load_gdt(&gdt_descr);
21278 /* Reload the per-cpu base */
21279@@ -897,6 +896,20 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21280 setup_smep(c);
21281 setup_smap(c);
21282
21283+#ifdef CONFIG_X86_32
21284+#ifdef CONFIG_PAX_PAGEEXEC
21285+ if (!(__supported_pte_mask & _PAGE_NX))
21286+ clear_cpu_cap(c, X86_FEATURE_PSE);
21287+#endif
21288+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
21289+ clear_cpu_cap(c, X86_FEATURE_SEP);
21290+#endif
21291+#endif
21292+
21293+#ifdef CONFIG_X86_64
21294+ setup_pcid(c);
21295+#endif
21296+
21297 /*
21298 * The vendor-specific functions might have changed features.
21299 * Now we do "generic changes."
21300@@ -979,7 +992,7 @@ static void syscall32_cpu_init(void)
21301 void enable_sep_cpu(void)
21302 {
21303 int cpu = get_cpu();
21304- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21305+ struct tss_struct *tss = init_tss + cpu;
21306
21307 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21308 put_cpu();
21309@@ -1117,14 +1130,16 @@ static __init int setup_disablecpuid(char *arg)
21310 }
21311 __setup("clearcpuid=", setup_disablecpuid);
21312
21313+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
21314+EXPORT_PER_CPU_SYMBOL(current_tinfo);
21315+
21316 DEFINE_PER_CPU(unsigned long, kernel_stack) =
21317- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
21318+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
21319 EXPORT_PER_CPU_SYMBOL(kernel_stack);
21320
21321 #ifdef CONFIG_X86_64
21322-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21323-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
21324- (unsigned long) debug_idt_table };
21325+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21326+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
21327
21328 DEFINE_PER_CPU_FIRST(union irq_stack_union,
21329 irq_stack_union) __aligned(PAGE_SIZE) __visible;
21330@@ -1307,7 +1322,7 @@ void cpu_init(void)
21331 */
21332 load_ucode_ap();
21333
21334- t = &per_cpu(init_tss, cpu);
21335+ t = init_tss + cpu;
21336 oist = &per_cpu(orig_ist, cpu);
21337
21338 #ifdef CONFIG_NUMA
21339@@ -1339,7 +1354,6 @@ void cpu_init(void)
21340 wrmsrl(MSR_KERNEL_GS_BASE, 0);
21341 barrier();
21342
21343- x86_configure_nx();
21344 x2apic_setup();
21345
21346 /*
21347@@ -1391,7 +1405,7 @@ void cpu_init(void)
21348 {
21349 int cpu = smp_processor_id();
21350 struct task_struct *curr = current;
21351- struct tss_struct *t = &per_cpu(init_tss, cpu);
21352+ struct tss_struct *t = init_tss + cpu;
21353 struct thread_struct *thread = &curr->thread;
21354
21355 wait_for_master_cpu(cpu);
21356diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
21357index 6596433..1ad6eaf 100644
21358--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
21359+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
21360@@ -1024,6 +1024,22 @@ static struct attribute *default_attrs[] = {
21361 };
21362
21363 #ifdef CONFIG_AMD_NB
21364+static struct attribute *default_attrs_amd_nb[] = {
21365+ &type.attr,
21366+ &level.attr,
21367+ &coherency_line_size.attr,
21368+ &physical_line_partition.attr,
21369+ &ways_of_associativity.attr,
21370+ &number_of_sets.attr,
21371+ &size.attr,
21372+ &shared_cpu_map.attr,
21373+ &shared_cpu_list.attr,
21374+ NULL,
21375+ NULL,
21376+ NULL,
21377+ NULL
21378+};
21379+
21380 static struct attribute **amd_l3_attrs(void)
21381 {
21382 static struct attribute **attrs;
21383@@ -1034,18 +1050,7 @@ static struct attribute **amd_l3_attrs(void)
21384
21385 n = ARRAY_SIZE(default_attrs);
21386
21387- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
21388- n += 2;
21389-
21390- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
21391- n += 1;
21392-
21393- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
21394- if (attrs == NULL)
21395- return attrs = default_attrs;
21396-
21397- for (n = 0; default_attrs[n]; n++)
21398- attrs[n] = default_attrs[n];
21399+ attrs = default_attrs_amd_nb;
21400
21401 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
21402 attrs[n++] = &cache_disable_0.attr;
21403@@ -1096,6 +1101,13 @@ static struct kobj_type ktype_cache = {
21404 .default_attrs = default_attrs,
21405 };
21406
21407+#ifdef CONFIG_AMD_NB
21408+static struct kobj_type ktype_cache_amd_nb = {
21409+ .sysfs_ops = &sysfs_ops,
21410+ .default_attrs = default_attrs_amd_nb,
21411+};
21412+#endif
21413+
21414 static struct kobj_type ktype_percpu_entry = {
21415 .sysfs_ops = &sysfs_ops,
21416 };
21417@@ -1161,20 +1173,26 @@ static int cache_add_dev(struct device *dev)
21418 return retval;
21419 }
21420
21421+#ifdef CONFIG_AMD_NB
21422+ amd_l3_attrs();
21423+#endif
21424+
21425 for (i = 0; i < num_cache_leaves; i++) {
21426+ struct kobj_type *ktype;
21427+
21428 this_object = INDEX_KOBJECT_PTR(cpu, i);
21429 this_object->cpu = cpu;
21430 this_object->index = i;
21431
21432 this_leaf = CPUID4_INFO_IDX(cpu, i);
21433
21434- ktype_cache.default_attrs = default_attrs;
21435+ ktype = &ktype_cache;
21436 #ifdef CONFIG_AMD_NB
21437 if (this_leaf->base.nb)
21438- ktype_cache.default_attrs = amd_l3_attrs();
21439+ ktype = &ktype_cache_amd_nb;
21440 #endif
21441 retval = kobject_init_and_add(&(this_object->kobj),
21442- &ktype_cache,
21443+ ktype,
21444 per_cpu(ici_cache_kobject, cpu),
21445 "index%1lu", i);
21446 if (unlikely(retval)) {
21447diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
21448index 3c036cb..3b5677d 100644
21449--- a/arch/x86/kernel/cpu/mcheck/mce.c
21450+++ b/arch/x86/kernel/cpu/mcheck/mce.c
21451@@ -47,6 +47,7 @@
21452 #include <asm/tlbflush.h>
21453 #include <asm/mce.h>
21454 #include <asm/msr.h>
21455+#include <asm/local.h>
21456
21457 #include "mce-internal.h"
21458
21459@@ -258,7 +259,7 @@ static void print_mce(struct mce *m)
21460 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
21461 m->cs, m->ip);
21462
21463- if (m->cs == __KERNEL_CS)
21464+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
21465 print_symbol("{%s}", m->ip);
21466 pr_cont("\n");
21467 }
21468@@ -291,10 +292,10 @@ static void print_mce(struct mce *m)
21469
21470 #define PANIC_TIMEOUT 5 /* 5 seconds */
21471
21472-static atomic_t mce_panicked;
21473+static atomic_unchecked_t mce_panicked;
21474
21475 static int fake_panic;
21476-static atomic_t mce_fake_panicked;
21477+static atomic_unchecked_t mce_fake_panicked;
21478
21479 /* Panic in progress. Enable interrupts and wait for final IPI */
21480 static void wait_for_panic(void)
21481@@ -318,7 +319,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
21482 /*
21483 * Make sure only one CPU runs in machine check panic
21484 */
21485- if (atomic_inc_return(&mce_panicked) > 1)
21486+ if (atomic_inc_return_unchecked(&mce_panicked) > 1)
21487 wait_for_panic();
21488 barrier();
21489
21490@@ -326,7 +327,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
21491 console_verbose();
21492 } else {
21493 /* Don't log too much for fake panic */
21494- if (atomic_inc_return(&mce_fake_panicked) > 1)
21495+ if (atomic_inc_return_unchecked(&mce_fake_panicked) > 1)
21496 return;
21497 }
21498 /* First print corrected ones that are still unlogged */
21499@@ -365,7 +366,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
21500 if (!fake_panic) {
21501 if (panic_timeout == 0)
21502 panic_timeout = mca_cfg.panic_timeout;
21503- panic(msg);
21504+ panic("%s", msg);
21505 } else
21506 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
21507 }
21508@@ -743,7 +744,7 @@ static int mce_timed_out(u64 *t, const char *msg)
21509 * might have been modified by someone else.
21510 */
21511 rmb();
21512- if (atomic_read(&mce_panicked))
21513+ if (atomic_read_unchecked(&mce_panicked))
21514 wait_for_panic();
21515 if (!mca_cfg.monarch_timeout)
21516 goto out;
21517@@ -1669,7 +1670,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
21518 }
21519
21520 /* Call the installed machine check handler for this CPU setup. */
21521-void (*machine_check_vector)(struct pt_regs *, long error_code) =
21522+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
21523 unexpected_machine_check;
21524
21525 /*
21526@@ -1692,7 +1693,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21527 return;
21528 }
21529
21530+ pax_open_kernel();
21531 machine_check_vector = do_machine_check;
21532+ pax_close_kernel();
21533
21534 __mcheck_cpu_init_generic();
21535 __mcheck_cpu_init_vendor(c);
21536@@ -1706,7 +1709,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21537 */
21538
21539 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
21540-static int mce_chrdev_open_count; /* #times opened */
21541+static local_t mce_chrdev_open_count; /* #times opened */
21542 static int mce_chrdev_open_exclu; /* already open exclusive? */
21543
21544 static int mce_chrdev_open(struct inode *inode, struct file *file)
21545@@ -1714,7 +1717,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21546 spin_lock(&mce_chrdev_state_lock);
21547
21548 if (mce_chrdev_open_exclu ||
21549- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
21550+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
21551 spin_unlock(&mce_chrdev_state_lock);
21552
21553 return -EBUSY;
21554@@ -1722,7 +1725,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21555
21556 if (file->f_flags & O_EXCL)
21557 mce_chrdev_open_exclu = 1;
21558- mce_chrdev_open_count++;
21559+ local_inc(&mce_chrdev_open_count);
21560
21561 spin_unlock(&mce_chrdev_state_lock);
21562
21563@@ -1733,7 +1736,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
21564 {
21565 spin_lock(&mce_chrdev_state_lock);
21566
21567- mce_chrdev_open_count--;
21568+ local_dec(&mce_chrdev_open_count);
21569 mce_chrdev_open_exclu = 0;
21570
21571 spin_unlock(&mce_chrdev_state_lock);
21572@@ -2408,7 +2411,7 @@ static __init void mce_init_banks(void)
21573
21574 for (i = 0; i < mca_cfg.banks; i++) {
21575 struct mce_bank *b = &mce_banks[i];
21576- struct device_attribute *a = &b->attr;
21577+ device_attribute_no_const *a = &b->attr;
21578
21579 sysfs_attr_init(&a->attr);
21580 a->attr.name = b->attrname;
21581@@ -2515,7 +2518,7 @@ struct dentry *mce_get_debugfs_dir(void)
21582 static void mce_reset(void)
21583 {
21584 cpu_missing = 0;
21585- atomic_set(&mce_fake_panicked, 0);
21586+ atomic_set_unchecked(&mce_fake_panicked, 0);
21587 atomic_set(&mce_executing, 0);
21588 atomic_set(&mce_callin, 0);
21589 atomic_set(&global_nwo, 0);
21590diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
21591index 737b0ad..09ec66e 100644
21592--- a/arch/x86/kernel/cpu/mcheck/p5.c
21593+++ b/arch/x86/kernel/cpu/mcheck/p5.c
21594@@ -12,6 +12,7 @@
21595 #include <asm/tlbflush.h>
21596 #include <asm/mce.h>
21597 #include <asm/msr.h>
21598+#include <asm/pgtable.h>
21599
21600 /* By default disabled */
21601 int mce_p5_enabled __read_mostly;
21602@@ -55,7 +56,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
21603 if (!cpu_has(c, X86_FEATURE_MCE))
21604 return;
21605
21606+ pax_open_kernel();
21607 machine_check_vector = pentium_machine_check;
21608+ pax_close_kernel();
21609 /* Make sure the vector pointer is visible before we enable MCEs: */
21610 wmb();
21611
21612diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
21613index 44f1382..315b292 100644
21614--- a/arch/x86/kernel/cpu/mcheck/winchip.c
21615+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
21616@@ -11,6 +11,7 @@
21617 #include <asm/tlbflush.h>
21618 #include <asm/mce.h>
21619 #include <asm/msr.h>
21620+#include <asm/pgtable.h>
21621
21622 /* Machine check handler for WinChip C6: */
21623 static void winchip_machine_check(struct pt_regs *regs, long error_code)
21624@@ -28,7 +29,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
21625 {
21626 u32 lo, hi;
21627
21628+ pax_open_kernel();
21629 machine_check_vector = winchip_machine_check;
21630+ pax_close_kernel();
21631 /* Make sure the vector pointer is visible before we enable MCEs: */
21632 wmb();
21633
21634diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
21635index 36a8361..e7058c2 100644
21636--- a/arch/x86/kernel/cpu/microcode/core.c
21637+++ b/arch/x86/kernel/cpu/microcode/core.c
21638@@ -518,7 +518,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21639 return NOTIFY_OK;
21640 }
21641
21642-static struct notifier_block __refdata mc_cpu_notifier = {
21643+static struct notifier_block mc_cpu_notifier = {
21644 .notifier_call = mc_cpu_callback,
21645 };
21646
21647diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
21648index 746e7fd..8dc677e 100644
21649--- a/arch/x86/kernel/cpu/microcode/intel.c
21650+++ b/arch/x86/kernel/cpu/microcode/intel.c
21651@@ -298,13 +298,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21652
21653 static int get_ucode_user(void *to, const void *from, size_t n)
21654 {
21655- return copy_from_user(to, from, n);
21656+ return copy_from_user(to, (const void __force_user *)from, n);
21657 }
21658
21659 static enum ucode_state
21660 request_microcode_user(int cpu, const void __user *buf, size_t size)
21661 {
21662- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21663+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21664 }
21665
21666 static void microcode_fini_cpu(int cpu)
21667diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
21668index ea5f363..cb0e905 100644
21669--- a/arch/x86/kernel/cpu/mtrr/main.c
21670+++ b/arch/x86/kernel/cpu/mtrr/main.c
21671@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
21672 u64 size_or_mask, size_and_mask;
21673 static bool mtrr_aps_delayed_init;
21674
21675-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
21676+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
21677
21678 const struct mtrr_ops *mtrr_if;
21679
21680diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
21681index df5e41f..816c719 100644
21682--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
21683+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
21684@@ -25,7 +25,7 @@ struct mtrr_ops {
21685 int (*validate_add_page)(unsigned long base, unsigned long size,
21686 unsigned int type);
21687 int (*have_wrcomb)(void);
21688-};
21689+} __do_const;
21690
21691 extern int generic_get_free_region(unsigned long base, unsigned long size,
21692 int replace_reg);
21693diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
21694index b71a7f8..534af0e 100644
21695--- a/arch/x86/kernel/cpu/perf_event.c
21696+++ b/arch/x86/kernel/cpu/perf_event.c
21697@@ -1376,7 +1376,7 @@ static void __init pmu_check_apic(void)
21698
21699 }
21700
21701-static struct attribute_group x86_pmu_format_group = {
21702+static attribute_group_no_const x86_pmu_format_group = {
21703 .name = "format",
21704 .attrs = NULL,
21705 };
21706@@ -1475,7 +1475,7 @@ static struct attribute *events_attr[] = {
21707 NULL,
21708 };
21709
21710-static struct attribute_group x86_pmu_events_group = {
21711+static attribute_group_no_const x86_pmu_events_group = {
21712 .name = "events",
21713 .attrs = events_attr,
21714 };
21715@@ -2037,7 +2037,7 @@ static unsigned long get_segment_base(unsigned int segment)
21716 if (idx > GDT_ENTRIES)
21717 return 0;
21718
21719- desc = raw_cpu_ptr(gdt_page.gdt);
21720+ desc = get_cpu_gdt_table(smp_processor_id());
21721 }
21722
21723 return get_desc_base(desc + idx);
21724@@ -2127,7 +2127,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
21725 break;
21726
21727 perf_callchain_store(entry, frame.return_address);
21728- fp = frame.next_frame;
21729+ fp = (const void __force_user *)frame.next_frame;
21730 }
21731 }
21732
21733diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21734index 97242a9..cf9c30e 100644
21735--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21736+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21737@@ -402,7 +402,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
21738 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
21739 {
21740 struct attribute **attrs;
21741- struct attribute_group *attr_group;
21742+ attribute_group_no_const *attr_group;
21743 int i = 0, j;
21744
21745 while (amd_iommu_v2_event_descs[i].attr.attr.name)
21746diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
21747index 2589906..1ca1000 100644
21748--- a/arch/x86/kernel/cpu/perf_event_intel.c
21749+++ b/arch/x86/kernel/cpu/perf_event_intel.c
21750@@ -2353,10 +2353,10 @@ __init int intel_pmu_init(void)
21751 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
21752
21753 if (boot_cpu_has(X86_FEATURE_PDCM)) {
21754- u64 capabilities;
21755+ u64 capabilities = x86_pmu.intel_cap.capabilities;
21756
21757- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
21758- x86_pmu.intel_cap.capabilities = capabilities;
21759+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
21760+ x86_pmu.intel_cap.capabilities = capabilities;
21761 }
21762
21763 intel_ds_init();
21764diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21765index c4bb8b8..9f7384d 100644
21766--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21767+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21768@@ -465,7 +465,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
21769 NULL,
21770 };
21771
21772-static struct attribute_group rapl_pmu_events_group = {
21773+static attribute_group_no_const rapl_pmu_events_group __read_only = {
21774 .name = "events",
21775 .attrs = NULL, /* patched at runtime */
21776 };
21777diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21778index c635b8b..b78835e 100644
21779--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21780+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21781@@ -733,7 +733,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
21782 static int __init uncore_type_init(struct intel_uncore_type *type)
21783 {
21784 struct intel_uncore_pmu *pmus;
21785- struct attribute_group *attr_group;
21786+ attribute_group_no_const *attr_group;
21787 struct attribute **attrs;
21788 int i, j;
21789
21790diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21791index 6c8c1e7..515b98a 100644
21792--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21793+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21794@@ -114,7 +114,7 @@ struct intel_uncore_box {
21795 struct uncore_event_desc {
21796 struct kobj_attribute attr;
21797 const char *config;
21798-};
21799+} __do_const;
21800
21801 ssize_t uncore_event_show(struct kobject *kobj,
21802 struct kobj_attribute *attr, char *buf);
21803diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
21804index 83741a7..bd3507d 100644
21805--- a/arch/x86/kernel/cpuid.c
21806+++ b/arch/x86/kernel/cpuid.c
21807@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
21808 return notifier_from_errno(err);
21809 }
21810
21811-static struct notifier_block __refdata cpuid_class_cpu_notifier =
21812+static struct notifier_block cpuid_class_cpu_notifier =
21813 {
21814 .notifier_call = cpuid_class_cpu_callback,
21815 };
21816diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
21817index aceb2f9..c76d3e3 100644
21818--- a/arch/x86/kernel/crash.c
21819+++ b/arch/x86/kernel/crash.c
21820@@ -105,7 +105,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
21821 #ifdef CONFIG_X86_32
21822 struct pt_regs fixed_regs;
21823
21824- if (!user_mode_vm(regs)) {
21825+ if (!user_mode(regs)) {
21826 crash_fixup_ss_esp(&fixed_regs, regs);
21827 regs = &fixed_regs;
21828 }
21829diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
21830index afa64ad..dce67dd 100644
21831--- a/arch/x86/kernel/crash_dump_64.c
21832+++ b/arch/x86/kernel/crash_dump_64.c
21833@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
21834 return -ENOMEM;
21835
21836 if (userbuf) {
21837- if (copy_to_user(buf, vaddr + offset, csize)) {
21838+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
21839 iounmap(vaddr);
21840 return -EFAULT;
21841 }
21842diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
21843index f6dfd93..892ade4 100644
21844--- a/arch/x86/kernel/doublefault.c
21845+++ b/arch/x86/kernel/doublefault.c
21846@@ -12,7 +12,7 @@
21847
21848 #define DOUBLEFAULT_STACKSIZE (1024)
21849 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
21850-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
21851+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
21852
21853 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
21854
21855@@ -22,7 +22,7 @@ static void doublefault_fn(void)
21856 unsigned long gdt, tss;
21857
21858 native_store_gdt(&gdt_desc);
21859- gdt = gdt_desc.address;
21860+ gdt = (unsigned long)gdt_desc.address;
21861
21862 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
21863
21864@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
21865 /* 0x2 bit is always set */
21866 .flags = X86_EFLAGS_SF | 0x2,
21867 .sp = STACK_START,
21868- .es = __USER_DS,
21869+ .es = __KERNEL_DS,
21870 .cs = __KERNEL_CS,
21871 .ss = __KERNEL_DS,
21872- .ds = __USER_DS,
21873+ .ds = __KERNEL_DS,
21874 .fs = __KERNEL_PERCPU,
21875
21876 .__cr3 = __pa_nodebug(swapper_pg_dir),
21877diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
21878index cf3df1d..b637d9a 100644
21879--- a/arch/x86/kernel/dumpstack.c
21880+++ b/arch/x86/kernel/dumpstack.c
21881@@ -2,6 +2,9 @@
21882 * Copyright (C) 1991, 1992 Linus Torvalds
21883 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
21884 */
21885+#ifdef CONFIG_GRKERNSEC_HIDESYM
21886+#define __INCLUDED_BY_HIDESYM 1
21887+#endif
21888 #include <linux/kallsyms.h>
21889 #include <linux/kprobes.h>
21890 #include <linux/uaccess.h>
21891@@ -33,23 +36,21 @@ static void printk_stack_address(unsigned long address, int reliable)
21892
21893 void printk_address(unsigned long address)
21894 {
21895- pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
21896+ pr_cont(" [<%p>] %pA\n", (void *)address, (void *)address);
21897 }
21898
21899 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
21900 static void
21901 print_ftrace_graph_addr(unsigned long addr, void *data,
21902 const struct stacktrace_ops *ops,
21903- struct thread_info *tinfo, int *graph)
21904+ struct task_struct *task, int *graph)
21905 {
21906- struct task_struct *task;
21907 unsigned long ret_addr;
21908 int index;
21909
21910 if (addr != (unsigned long)return_to_handler)
21911 return;
21912
21913- task = tinfo->task;
21914 index = task->curr_ret_stack;
21915
21916 if (!task->ret_stack || index < *graph)
21917@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21918 static inline void
21919 print_ftrace_graph_addr(unsigned long addr, void *data,
21920 const struct stacktrace_ops *ops,
21921- struct thread_info *tinfo, int *graph)
21922+ struct task_struct *task, int *graph)
21923 { }
21924 #endif
21925
21926@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21927 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
21928 */
21929
21930-static inline int valid_stack_ptr(struct thread_info *tinfo,
21931- void *p, unsigned int size, void *end)
21932+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
21933 {
21934- void *t = tinfo;
21935 if (end) {
21936 if (p < end && p >= (end-THREAD_SIZE))
21937 return 1;
21938@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
21939 }
21940
21941 unsigned long
21942-print_context_stack(struct thread_info *tinfo,
21943+print_context_stack(struct task_struct *task, void *stack_start,
21944 unsigned long *stack, unsigned long bp,
21945 const struct stacktrace_ops *ops, void *data,
21946 unsigned long *end, int *graph)
21947 {
21948 struct stack_frame *frame = (struct stack_frame *)bp;
21949
21950- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
21951+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
21952 unsigned long addr;
21953
21954 addr = *stack;
21955@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
21956 } else {
21957 ops->address(data, addr, 0);
21958 }
21959- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21960+ print_ftrace_graph_addr(addr, data, ops, task, graph);
21961 }
21962 stack++;
21963 }
21964@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
21965 EXPORT_SYMBOL_GPL(print_context_stack);
21966
21967 unsigned long
21968-print_context_stack_bp(struct thread_info *tinfo,
21969+print_context_stack_bp(struct task_struct *task, void *stack_start,
21970 unsigned long *stack, unsigned long bp,
21971 const struct stacktrace_ops *ops, void *data,
21972 unsigned long *end, int *graph)
21973@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21974 struct stack_frame *frame = (struct stack_frame *)bp;
21975 unsigned long *ret_addr = &frame->return_address;
21976
21977- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
21978+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
21979 unsigned long addr = *ret_addr;
21980
21981 if (!__kernel_text_address(addr))
21982@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21983 ops->address(data, addr, 1);
21984 frame = frame->next_frame;
21985 ret_addr = &frame->return_address;
21986- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21987+ print_ftrace_graph_addr(addr, data, ops, task, graph);
21988 }
21989
21990 return (unsigned long)frame;
21991@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
21992 static void print_trace_address(void *data, unsigned long addr, int reliable)
21993 {
21994 touch_nmi_watchdog();
21995- printk(data);
21996+ printk("%s", (char *)data);
21997 printk_stack_address(addr, reliable);
21998 }
21999
22000@@ -225,6 +224,8 @@ unsigned long oops_begin(void)
22001 EXPORT_SYMBOL_GPL(oops_begin);
22002 NOKPROBE_SYMBOL(oops_begin);
22003
22004+extern void gr_handle_kernel_exploit(void);
22005+
22006 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22007 {
22008 if (regs && kexec_should_crash(current))
22009@@ -246,7 +247,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22010 panic("Fatal exception in interrupt");
22011 if (panic_on_oops)
22012 panic("Fatal exception");
22013- do_exit(signr);
22014+
22015+ gr_handle_kernel_exploit();
22016+
22017+ do_group_exit(signr);
22018 }
22019 NOKPROBE_SYMBOL(oops_end);
22020
22021@@ -278,7 +282,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
22022 print_modules();
22023 show_regs(regs);
22024 #ifdef CONFIG_X86_32
22025- if (user_mode_vm(regs)) {
22026+ if (user_mode(regs)) {
22027 sp = regs->sp;
22028 ss = regs->ss & 0xffff;
22029 } else {
22030@@ -307,7 +311,7 @@ void die(const char *str, struct pt_regs *regs, long err)
22031 unsigned long flags = oops_begin();
22032 int sig = SIGSEGV;
22033
22034- if (!user_mode_vm(regs))
22035+ if (!user_mode(regs))
22036 report_bug(regs->ip, regs);
22037
22038 if (__die(str, regs, err))
22039diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
22040index 5abd4cd..c65733b 100644
22041--- a/arch/x86/kernel/dumpstack_32.c
22042+++ b/arch/x86/kernel/dumpstack_32.c
22043@@ -61,15 +61,14 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22044 bp = stack_frame(task, regs);
22045
22046 for (;;) {
22047- struct thread_info *context;
22048+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22049 void *end_stack;
22050
22051 end_stack = is_hardirq_stack(stack, cpu);
22052 if (!end_stack)
22053 end_stack = is_softirq_stack(stack, cpu);
22054
22055- context = task_thread_info(task);
22056- bp = ops->walk_stack(context, stack, bp, ops, data,
22057+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data,
22058 end_stack, &graph);
22059
22060 /* Stop if not on irq stack */
22061@@ -123,27 +122,28 @@ void show_regs(struct pt_regs *regs)
22062 int i;
22063
22064 show_regs_print_info(KERN_EMERG);
22065- __show_regs(regs, !user_mode_vm(regs));
22066+ __show_regs(regs, !user_mode(regs));
22067
22068 /*
22069 * When in-kernel, we also print out the stack and code at the
22070 * time of the fault..
22071 */
22072- if (!user_mode_vm(regs)) {
22073+ if (!user_mode(regs)) {
22074 unsigned int code_prologue = code_bytes * 43 / 64;
22075 unsigned int code_len = code_bytes;
22076 unsigned char c;
22077 u8 *ip;
22078+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
22079
22080 pr_emerg("Stack:\n");
22081 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
22082
22083 pr_emerg("Code:");
22084
22085- ip = (u8 *)regs->ip - code_prologue;
22086+ ip = (u8 *)regs->ip - code_prologue + cs_base;
22087 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
22088 /* try starting at IP */
22089- ip = (u8 *)regs->ip;
22090+ ip = (u8 *)regs->ip + cs_base;
22091 code_len = code_len - code_prologue + 1;
22092 }
22093 for (i = 0; i < code_len; i++, ip++) {
22094@@ -152,7 +152,7 @@ void show_regs(struct pt_regs *regs)
22095 pr_cont(" Bad EIP value.");
22096 break;
22097 }
22098- if (ip == (u8 *)regs->ip)
22099+ if (ip == (u8 *)regs->ip + cs_base)
22100 pr_cont(" <%02x>", c);
22101 else
22102 pr_cont(" %02x", c);
22103@@ -165,6 +165,7 @@ int is_valid_bugaddr(unsigned long ip)
22104 {
22105 unsigned short ud2;
22106
22107+ ip = ktla_ktva(ip);
22108 if (ip < PAGE_OFFSET)
22109 return 0;
22110 if (probe_kernel_address((unsigned short *)ip, ud2))
22111@@ -172,3 +173,15 @@ int is_valid_bugaddr(unsigned long ip)
22112
22113 return ud2 == 0x0b0f;
22114 }
22115+
22116+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22117+void pax_check_alloca(unsigned long size)
22118+{
22119+ unsigned long sp = (unsigned long)&sp, stack_left;
22120+
22121+ /* all kernel stacks are of the same size */
22122+ stack_left = sp & (THREAD_SIZE - 1);
22123+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22124+}
22125+EXPORT_SYMBOL(pax_check_alloca);
22126+#endif
22127diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
22128index ff86f19..73eabf4 100644
22129--- a/arch/x86/kernel/dumpstack_64.c
22130+++ b/arch/x86/kernel/dumpstack_64.c
22131@@ -153,12 +153,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22132 const struct stacktrace_ops *ops, void *data)
22133 {
22134 const unsigned cpu = get_cpu();
22135- struct thread_info *tinfo;
22136 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
22137 unsigned long dummy;
22138 unsigned used = 0;
22139 int graph = 0;
22140 int done = 0;
22141+ void *stack_start;
22142
22143 if (!task)
22144 task = current;
22145@@ -179,7 +179,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22146 * current stack address. If the stacks consist of nested
22147 * exceptions
22148 */
22149- tinfo = task_thread_info(task);
22150 while (!done) {
22151 unsigned long *stack_end;
22152 enum stack_type stype;
22153@@ -202,7 +201,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22154 if (ops->stack(data, id) < 0)
22155 break;
22156
22157- bp = ops->walk_stack(tinfo, stack, bp, ops,
22158+ bp = ops->walk_stack(task, stack_end - EXCEPTION_STKSZ, stack, bp, ops,
22159 data, stack_end, &graph);
22160 ops->stack(data, "<EOE>");
22161 /*
22162@@ -210,6 +209,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22163 * second-to-last pointer (index -2 to end) in the
22164 * exception stack:
22165 */
22166+ if ((u16)stack_end[-1] != __KERNEL_DS)
22167+ goto out;
22168 stack = (unsigned long *) stack_end[-2];
22169 done = 0;
22170 break;
22171@@ -218,7 +219,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22172
22173 if (ops->stack(data, "IRQ") < 0)
22174 break;
22175- bp = ops->walk_stack(tinfo, stack, bp,
22176+ bp = ops->walk_stack(task, irq_stack, stack, bp,
22177 ops, data, stack_end, &graph);
22178 /*
22179 * We link to the next stack (which would be
22180@@ -240,7 +241,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22181 /*
22182 * This handles the process stack:
22183 */
22184- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
22185+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22186+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
22187+out:
22188 put_cpu();
22189 }
22190 EXPORT_SYMBOL(dump_trace);
22191@@ -344,8 +347,55 @@ int is_valid_bugaddr(unsigned long ip)
22192 {
22193 unsigned short ud2;
22194
22195- if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
22196+ if (probe_kernel_address((unsigned short *)ip, ud2))
22197 return 0;
22198
22199 return ud2 == 0x0b0f;
22200 }
22201+
22202+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22203+void pax_check_alloca(unsigned long size)
22204+{
22205+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
22206+ unsigned cpu, used;
22207+ char *id;
22208+
22209+ /* check the process stack first */
22210+ stack_start = (unsigned long)task_stack_page(current);
22211+ stack_end = stack_start + THREAD_SIZE;
22212+ if (likely(stack_start <= sp && sp < stack_end)) {
22213+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
22214+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22215+ return;
22216+ }
22217+
22218+ cpu = get_cpu();
22219+
22220+ /* check the irq stacks */
22221+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
22222+ stack_start = stack_end - IRQ_STACK_SIZE;
22223+ if (stack_start <= sp && sp < stack_end) {
22224+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
22225+ put_cpu();
22226+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22227+ return;
22228+ }
22229+
22230+ /* check the exception stacks */
22231+ used = 0;
22232+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
22233+ stack_start = stack_end - EXCEPTION_STKSZ;
22234+ if (stack_end && stack_start <= sp && sp < stack_end) {
22235+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
22236+ put_cpu();
22237+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22238+ return;
22239+ }
22240+
22241+ put_cpu();
22242+
22243+ /* unknown stack */
22244+ BUG();
22245+}
22246+EXPORT_SYMBOL(pax_check_alloca);
22247+#endif
22248diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
22249index 46201de..ebffabf 100644
22250--- a/arch/x86/kernel/e820.c
22251+++ b/arch/x86/kernel/e820.c
22252@@ -794,8 +794,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
22253
22254 static void early_panic(char *msg)
22255 {
22256- early_printk(msg);
22257- panic(msg);
22258+ early_printk("%s", msg);
22259+ panic("%s", msg);
22260 }
22261
22262 static int userdef __initdata;
22263diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
22264index a62536a..8444df4 100644
22265--- a/arch/x86/kernel/early_printk.c
22266+++ b/arch/x86/kernel/early_printk.c
22267@@ -7,6 +7,7 @@
22268 #include <linux/pci_regs.h>
22269 #include <linux/pci_ids.h>
22270 #include <linux/errno.h>
22271+#include <linux/sched.h>
22272 #include <asm/io.h>
22273 #include <asm/processor.h>
22274 #include <asm/fcntl.h>
22275diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
22276index 31e2d5b..b31c76d 100644
22277--- a/arch/x86/kernel/entry_32.S
22278+++ b/arch/x86/kernel/entry_32.S
22279@@ -177,13 +177,154 @@
22280 /*CFI_REL_OFFSET gs, PT_GS*/
22281 .endm
22282 .macro SET_KERNEL_GS reg
22283+
22284+#ifdef CONFIG_CC_STACKPROTECTOR
22285 movl $(__KERNEL_STACK_CANARY), \reg
22286+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
22287+ movl $(__USER_DS), \reg
22288+#else
22289+ xorl \reg, \reg
22290+#endif
22291+
22292 movl \reg, %gs
22293 .endm
22294
22295 #endif /* CONFIG_X86_32_LAZY_GS */
22296
22297-.macro SAVE_ALL
22298+.macro pax_enter_kernel
22299+#ifdef CONFIG_PAX_KERNEXEC
22300+ call pax_enter_kernel
22301+#endif
22302+.endm
22303+
22304+.macro pax_exit_kernel
22305+#ifdef CONFIG_PAX_KERNEXEC
22306+ call pax_exit_kernel
22307+#endif
22308+.endm
22309+
22310+#ifdef CONFIG_PAX_KERNEXEC
22311+ENTRY(pax_enter_kernel)
22312+#ifdef CONFIG_PARAVIRT
22313+ pushl %eax
22314+ pushl %ecx
22315+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
22316+ mov %eax, %esi
22317+#else
22318+ mov %cr0, %esi
22319+#endif
22320+ bts $16, %esi
22321+ jnc 1f
22322+ mov %cs, %esi
22323+ cmp $__KERNEL_CS, %esi
22324+ jz 3f
22325+ ljmp $__KERNEL_CS, $3f
22326+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
22327+2:
22328+#ifdef CONFIG_PARAVIRT
22329+ mov %esi, %eax
22330+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
22331+#else
22332+ mov %esi, %cr0
22333+#endif
22334+3:
22335+#ifdef CONFIG_PARAVIRT
22336+ popl %ecx
22337+ popl %eax
22338+#endif
22339+ ret
22340+ENDPROC(pax_enter_kernel)
22341+
22342+ENTRY(pax_exit_kernel)
22343+#ifdef CONFIG_PARAVIRT
22344+ pushl %eax
22345+ pushl %ecx
22346+#endif
22347+ mov %cs, %esi
22348+ cmp $__KERNEXEC_KERNEL_CS, %esi
22349+ jnz 2f
22350+#ifdef CONFIG_PARAVIRT
22351+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
22352+ mov %eax, %esi
22353+#else
22354+ mov %cr0, %esi
22355+#endif
22356+ btr $16, %esi
22357+ ljmp $__KERNEL_CS, $1f
22358+1:
22359+#ifdef CONFIG_PARAVIRT
22360+ mov %esi, %eax
22361+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
22362+#else
22363+ mov %esi, %cr0
22364+#endif
22365+2:
22366+#ifdef CONFIG_PARAVIRT
22367+ popl %ecx
22368+ popl %eax
22369+#endif
22370+ ret
22371+ENDPROC(pax_exit_kernel)
22372+#endif
22373+
22374+ .macro pax_erase_kstack
22375+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22376+ call pax_erase_kstack
22377+#endif
22378+ .endm
22379+
22380+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22381+/*
22382+ * ebp: thread_info
22383+ */
22384+ENTRY(pax_erase_kstack)
22385+ pushl %edi
22386+ pushl %ecx
22387+ pushl %eax
22388+
22389+ mov TI_lowest_stack(%ebp), %edi
22390+ mov $-0xBEEF, %eax
22391+ std
22392+
22393+1: mov %edi, %ecx
22394+ and $THREAD_SIZE_asm - 1, %ecx
22395+ shr $2, %ecx
22396+ repne scasl
22397+ jecxz 2f
22398+
22399+ cmp $2*16, %ecx
22400+ jc 2f
22401+
22402+ mov $2*16, %ecx
22403+ repe scasl
22404+ jecxz 2f
22405+ jne 1b
22406+
22407+2: cld
22408+ or $2*4, %edi
22409+ mov %esp, %ecx
22410+ sub %edi, %ecx
22411+
22412+ cmp $THREAD_SIZE_asm, %ecx
22413+ jb 3f
22414+ ud2
22415+3:
22416+
22417+ shr $2, %ecx
22418+ rep stosl
22419+
22420+ mov TI_task_thread_sp0(%ebp), %edi
22421+ sub $128, %edi
22422+ mov %edi, TI_lowest_stack(%ebp)
22423+
22424+ popl %eax
22425+ popl %ecx
22426+ popl %edi
22427+ ret
22428+ENDPROC(pax_erase_kstack)
22429+#endif
22430+
22431+.macro __SAVE_ALL _DS
22432 cld
22433 PUSH_GS
22434 pushl_cfi %fs
22435@@ -206,7 +347,7 @@
22436 CFI_REL_OFFSET ecx, 0
22437 pushl_cfi %ebx
22438 CFI_REL_OFFSET ebx, 0
22439- movl $(__USER_DS), %edx
22440+ movl $\_DS, %edx
22441 movl %edx, %ds
22442 movl %edx, %es
22443 movl $(__KERNEL_PERCPU), %edx
22444@@ -214,6 +355,15 @@
22445 SET_KERNEL_GS %edx
22446 .endm
22447
22448+.macro SAVE_ALL
22449+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22450+ __SAVE_ALL __KERNEL_DS
22451+ pax_enter_kernel
22452+#else
22453+ __SAVE_ALL __USER_DS
22454+#endif
22455+.endm
22456+
22457 .macro RESTORE_INT_REGS
22458 popl_cfi %ebx
22459 CFI_RESTORE ebx
22460@@ -297,7 +447,7 @@ ENTRY(ret_from_fork)
22461 popfl_cfi
22462 jmp syscall_exit
22463 CFI_ENDPROC
22464-END(ret_from_fork)
22465+ENDPROC(ret_from_fork)
22466
22467 ENTRY(ret_from_kernel_thread)
22468 CFI_STARTPROC
22469@@ -340,7 +490,15 @@ ret_from_intr:
22470 andl $SEGMENT_RPL_MASK, %eax
22471 #endif
22472 cmpl $USER_RPL, %eax
22473+
22474+#ifdef CONFIG_PAX_KERNEXEC
22475+ jae resume_userspace
22476+
22477+ pax_exit_kernel
22478+ jmp resume_kernel
22479+#else
22480 jb resume_kernel # not returning to v8086 or userspace
22481+#endif
22482
22483 ENTRY(resume_userspace)
22484 LOCKDEP_SYS_EXIT
22485@@ -352,8 +510,8 @@ ENTRY(resume_userspace)
22486 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
22487 # int/exception return?
22488 jne work_pending
22489- jmp restore_all
22490-END(ret_from_exception)
22491+ jmp restore_all_pax
22492+ENDPROC(ret_from_exception)
22493
22494 #ifdef CONFIG_PREEMPT
22495 ENTRY(resume_kernel)
22496@@ -365,7 +523,7 @@ need_resched:
22497 jz restore_all
22498 call preempt_schedule_irq
22499 jmp need_resched
22500-END(resume_kernel)
22501+ENDPROC(resume_kernel)
22502 #endif
22503 CFI_ENDPROC
22504
22505@@ -395,30 +553,45 @@ sysenter_past_esp:
22506 /*CFI_REL_OFFSET cs, 0*/
22507 /*
22508 * Push current_thread_info()->sysenter_return to the stack.
22509- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
22510- * pushed above; +8 corresponds to copy_thread's esp0 setting.
22511 */
22512- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
22513+ pushl_cfi $0
22514 CFI_REL_OFFSET eip, 0
22515
22516 pushl_cfi %eax
22517 SAVE_ALL
22518+ GET_THREAD_INFO(%ebp)
22519+ movl TI_sysenter_return(%ebp),%ebp
22520+ movl %ebp,PT_EIP(%esp)
22521 ENABLE_INTERRUPTS(CLBR_NONE)
22522
22523 /*
22524 * Load the potential sixth argument from user stack.
22525 * Careful about security.
22526 */
22527+ movl PT_OLDESP(%esp),%ebp
22528+
22529+#ifdef CONFIG_PAX_MEMORY_UDEREF
22530+ mov PT_OLDSS(%esp),%ds
22531+1: movl %ds:(%ebp),%ebp
22532+ push %ss
22533+ pop %ds
22534+#else
22535 cmpl $__PAGE_OFFSET-3,%ebp
22536 jae syscall_fault
22537 ASM_STAC
22538 1: movl (%ebp),%ebp
22539 ASM_CLAC
22540+#endif
22541+
22542 movl %ebp,PT_EBP(%esp)
22543 _ASM_EXTABLE(1b,syscall_fault)
22544
22545 GET_THREAD_INFO(%ebp)
22546
22547+#ifdef CONFIG_PAX_RANDKSTACK
22548+ pax_erase_kstack
22549+#endif
22550+
22551 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22552 jnz sysenter_audit
22553 sysenter_do_call:
22554@@ -434,12 +607,24 @@ sysenter_after_call:
22555 testl $_TIF_ALLWORK_MASK, %ecx
22556 jne sysexit_audit
22557 sysenter_exit:
22558+
22559+#ifdef CONFIG_PAX_RANDKSTACK
22560+ pushl_cfi %eax
22561+ movl %esp, %eax
22562+ call pax_randomize_kstack
22563+ popl_cfi %eax
22564+#endif
22565+
22566+ pax_erase_kstack
22567+
22568 /* if something modifies registers it must also disable sysexit */
22569 movl PT_EIP(%esp), %edx
22570 movl PT_OLDESP(%esp), %ecx
22571 xorl %ebp,%ebp
22572 TRACE_IRQS_ON
22573 1: mov PT_FS(%esp), %fs
22574+2: mov PT_DS(%esp), %ds
22575+3: mov PT_ES(%esp), %es
22576 PTGS_TO_GS
22577 ENABLE_INTERRUPTS_SYSEXIT
22578
22579@@ -453,6 +638,9 @@ sysenter_audit:
22580 pushl_cfi PT_ESI(%esp) /* a3: 5th arg */
22581 pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */
22582 call __audit_syscall_entry
22583+
22584+ pax_erase_kstack
22585+
22586 popl_cfi %ecx /* get that remapped edx off the stack */
22587 popl_cfi %ecx /* get that remapped esi off the stack */
22588 movl PT_EAX(%esp),%eax /* reload syscall number */
22589@@ -479,10 +667,16 @@ sysexit_audit:
22590
22591 CFI_ENDPROC
22592 .pushsection .fixup,"ax"
22593-2: movl $0,PT_FS(%esp)
22594+4: movl $0,PT_FS(%esp)
22595+ jmp 1b
22596+5: movl $0,PT_DS(%esp)
22597+ jmp 1b
22598+6: movl $0,PT_ES(%esp)
22599 jmp 1b
22600 .popsection
22601- _ASM_EXTABLE(1b,2b)
22602+ _ASM_EXTABLE(1b,4b)
22603+ _ASM_EXTABLE(2b,5b)
22604+ _ASM_EXTABLE(3b,6b)
22605 PTGS_TO_GS_EX
22606 ENDPROC(ia32_sysenter_target)
22607
22608@@ -493,6 +687,11 @@ ENTRY(system_call)
22609 pushl_cfi %eax # save orig_eax
22610 SAVE_ALL
22611 GET_THREAD_INFO(%ebp)
22612+
22613+#ifdef CONFIG_PAX_RANDKSTACK
22614+ pax_erase_kstack
22615+#endif
22616+
22617 # system call tracing in operation / emulation
22618 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22619 jnz syscall_trace_entry
22620@@ -512,6 +711,15 @@ syscall_exit:
22621 testl $_TIF_ALLWORK_MASK, %ecx # current->work
22622 jne syscall_exit_work
22623
22624+restore_all_pax:
22625+
22626+#ifdef CONFIG_PAX_RANDKSTACK
22627+ movl %esp, %eax
22628+ call pax_randomize_kstack
22629+#endif
22630+
22631+ pax_erase_kstack
22632+
22633 restore_all:
22634 TRACE_IRQS_IRET
22635 restore_all_notrace:
22636@@ -566,14 +774,34 @@ ldt_ss:
22637 * compensating for the offset by changing to the ESPFIX segment with
22638 * a base address that matches for the difference.
22639 */
22640-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
22641+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
22642 mov %esp, %edx /* load kernel esp */
22643 mov PT_OLDESP(%esp), %eax /* load userspace esp */
22644 mov %dx, %ax /* eax: new kernel esp */
22645 sub %eax, %edx /* offset (low word is 0) */
22646+#ifdef CONFIG_SMP
22647+ movl PER_CPU_VAR(cpu_number), %ebx
22648+ shll $PAGE_SHIFT_asm, %ebx
22649+ addl $cpu_gdt_table, %ebx
22650+#else
22651+ movl $cpu_gdt_table, %ebx
22652+#endif
22653 shr $16, %edx
22654- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
22655- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
22656+
22657+#ifdef CONFIG_PAX_KERNEXEC
22658+ mov %cr0, %esi
22659+ btr $16, %esi
22660+ mov %esi, %cr0
22661+#endif
22662+
22663+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
22664+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
22665+
22666+#ifdef CONFIG_PAX_KERNEXEC
22667+ bts $16, %esi
22668+ mov %esi, %cr0
22669+#endif
22670+
22671 pushl_cfi $__ESPFIX_SS
22672 pushl_cfi %eax /* new kernel esp */
22673 /* Disable interrupts, but do not irqtrace this section: we
22674@@ -603,20 +831,18 @@ work_resched:
22675 movl TI_flags(%ebp), %ecx
22676 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
22677 # than syscall tracing?
22678- jz restore_all
22679+ jz restore_all_pax
22680 testb $_TIF_NEED_RESCHED, %cl
22681 jnz work_resched
22682
22683 work_notifysig: # deal with pending signals and
22684 # notify-resume requests
22685+ movl %esp, %eax
22686 #ifdef CONFIG_VM86
22687 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
22688- movl %esp, %eax
22689 jne work_notifysig_v86 # returning to kernel-space or
22690 # vm86-space
22691 1:
22692-#else
22693- movl %esp, %eax
22694 #endif
22695 TRACE_IRQS_ON
22696 ENABLE_INTERRUPTS(CLBR_NONE)
22697@@ -637,7 +863,7 @@ work_notifysig_v86:
22698 movl %eax, %esp
22699 jmp 1b
22700 #endif
22701-END(work_pending)
22702+ENDPROC(work_pending)
22703
22704 # perform syscall exit tracing
22705 ALIGN
22706@@ -645,11 +871,14 @@ syscall_trace_entry:
22707 movl $-ENOSYS,PT_EAX(%esp)
22708 movl %esp, %eax
22709 call syscall_trace_enter
22710+
22711+ pax_erase_kstack
22712+
22713 /* What it returned is what we'll actually use. */
22714 cmpl $(NR_syscalls), %eax
22715 jnae syscall_call
22716 jmp syscall_exit
22717-END(syscall_trace_entry)
22718+ENDPROC(syscall_trace_entry)
22719
22720 # perform syscall exit tracing
22721 ALIGN
22722@@ -662,26 +891,30 @@ syscall_exit_work:
22723 movl %esp, %eax
22724 call syscall_trace_leave
22725 jmp resume_userspace
22726-END(syscall_exit_work)
22727+ENDPROC(syscall_exit_work)
22728 CFI_ENDPROC
22729
22730 RING0_INT_FRAME # can't unwind into user space anyway
22731 syscall_fault:
22732+#ifdef CONFIG_PAX_MEMORY_UDEREF
22733+ push %ss
22734+ pop %ds
22735+#endif
22736 ASM_CLAC
22737 GET_THREAD_INFO(%ebp)
22738 movl $-EFAULT,PT_EAX(%esp)
22739 jmp resume_userspace
22740-END(syscall_fault)
22741+ENDPROC(syscall_fault)
22742
22743 syscall_badsys:
22744 movl $-ENOSYS,%eax
22745 jmp syscall_after_call
22746-END(syscall_badsys)
22747+ENDPROC(syscall_badsys)
22748
22749 sysenter_badsys:
22750 movl $-ENOSYS,%eax
22751 jmp sysenter_after_call
22752-END(sysenter_badsys)
22753+ENDPROC(sysenter_badsys)
22754 CFI_ENDPROC
22755
22756 .macro FIXUP_ESPFIX_STACK
22757@@ -694,8 +927,15 @@ END(sysenter_badsys)
22758 */
22759 #ifdef CONFIG_X86_ESPFIX32
22760 /* fixup the stack */
22761- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
22762- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
22763+#ifdef CONFIG_SMP
22764+ movl PER_CPU_VAR(cpu_number), %ebx
22765+ shll $PAGE_SHIFT_asm, %ebx
22766+ addl $cpu_gdt_table, %ebx
22767+#else
22768+ movl $cpu_gdt_table, %ebx
22769+#endif
22770+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
22771+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
22772 shl $16, %eax
22773 addl %esp, %eax /* the adjusted stack pointer */
22774 pushl_cfi $__KERNEL_DS
22775@@ -751,7 +991,7 @@ vector=vector+1
22776 .endr
22777 2: jmp common_interrupt
22778 .endr
22779-END(irq_entries_start)
22780+ENDPROC(irq_entries_start)
22781
22782 .previous
22783 END(interrupt)
22784@@ -808,7 +1048,7 @@ ENTRY(coprocessor_error)
22785 pushl_cfi $do_coprocessor_error
22786 jmp error_code
22787 CFI_ENDPROC
22788-END(coprocessor_error)
22789+ENDPROC(coprocessor_error)
22790
22791 ENTRY(simd_coprocessor_error)
22792 RING0_INT_FRAME
22793@@ -821,7 +1061,7 @@ ENTRY(simd_coprocessor_error)
22794 .section .altinstructions,"a"
22795 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
22796 .previous
22797-.section .altinstr_replacement,"ax"
22798+.section .altinstr_replacement,"a"
22799 663: pushl $do_simd_coprocessor_error
22800 664:
22801 .previous
22802@@ -830,7 +1070,7 @@ ENTRY(simd_coprocessor_error)
22803 #endif
22804 jmp error_code
22805 CFI_ENDPROC
22806-END(simd_coprocessor_error)
22807+ENDPROC(simd_coprocessor_error)
22808
22809 ENTRY(device_not_available)
22810 RING0_INT_FRAME
22811@@ -839,18 +1079,18 @@ ENTRY(device_not_available)
22812 pushl_cfi $do_device_not_available
22813 jmp error_code
22814 CFI_ENDPROC
22815-END(device_not_available)
22816+ENDPROC(device_not_available)
22817
22818 #ifdef CONFIG_PARAVIRT
22819 ENTRY(native_iret)
22820 iret
22821 _ASM_EXTABLE(native_iret, iret_exc)
22822-END(native_iret)
22823+ENDPROC(native_iret)
22824
22825 ENTRY(native_irq_enable_sysexit)
22826 sti
22827 sysexit
22828-END(native_irq_enable_sysexit)
22829+ENDPROC(native_irq_enable_sysexit)
22830 #endif
22831
22832 ENTRY(overflow)
22833@@ -860,7 +1100,7 @@ ENTRY(overflow)
22834 pushl_cfi $do_overflow
22835 jmp error_code
22836 CFI_ENDPROC
22837-END(overflow)
22838+ENDPROC(overflow)
22839
22840 ENTRY(bounds)
22841 RING0_INT_FRAME
22842@@ -869,7 +1109,7 @@ ENTRY(bounds)
22843 pushl_cfi $do_bounds
22844 jmp error_code
22845 CFI_ENDPROC
22846-END(bounds)
22847+ENDPROC(bounds)
22848
22849 ENTRY(invalid_op)
22850 RING0_INT_FRAME
22851@@ -878,7 +1118,7 @@ ENTRY(invalid_op)
22852 pushl_cfi $do_invalid_op
22853 jmp error_code
22854 CFI_ENDPROC
22855-END(invalid_op)
22856+ENDPROC(invalid_op)
22857
22858 ENTRY(coprocessor_segment_overrun)
22859 RING0_INT_FRAME
22860@@ -887,7 +1127,7 @@ ENTRY(coprocessor_segment_overrun)
22861 pushl_cfi $do_coprocessor_segment_overrun
22862 jmp error_code
22863 CFI_ENDPROC
22864-END(coprocessor_segment_overrun)
22865+ENDPROC(coprocessor_segment_overrun)
22866
22867 ENTRY(invalid_TSS)
22868 RING0_EC_FRAME
22869@@ -895,7 +1135,7 @@ ENTRY(invalid_TSS)
22870 pushl_cfi $do_invalid_TSS
22871 jmp error_code
22872 CFI_ENDPROC
22873-END(invalid_TSS)
22874+ENDPROC(invalid_TSS)
22875
22876 ENTRY(segment_not_present)
22877 RING0_EC_FRAME
22878@@ -903,7 +1143,7 @@ ENTRY(segment_not_present)
22879 pushl_cfi $do_segment_not_present
22880 jmp error_code
22881 CFI_ENDPROC
22882-END(segment_not_present)
22883+ENDPROC(segment_not_present)
22884
22885 ENTRY(stack_segment)
22886 RING0_EC_FRAME
22887@@ -911,7 +1151,7 @@ ENTRY(stack_segment)
22888 pushl_cfi $do_stack_segment
22889 jmp error_code
22890 CFI_ENDPROC
22891-END(stack_segment)
22892+ENDPROC(stack_segment)
22893
22894 ENTRY(alignment_check)
22895 RING0_EC_FRAME
22896@@ -919,7 +1159,7 @@ ENTRY(alignment_check)
22897 pushl_cfi $do_alignment_check
22898 jmp error_code
22899 CFI_ENDPROC
22900-END(alignment_check)
22901+ENDPROC(alignment_check)
22902
22903 ENTRY(divide_error)
22904 RING0_INT_FRAME
22905@@ -928,7 +1168,7 @@ ENTRY(divide_error)
22906 pushl_cfi $do_divide_error
22907 jmp error_code
22908 CFI_ENDPROC
22909-END(divide_error)
22910+ENDPROC(divide_error)
22911
22912 #ifdef CONFIG_X86_MCE
22913 ENTRY(machine_check)
22914@@ -938,7 +1178,7 @@ ENTRY(machine_check)
22915 pushl_cfi machine_check_vector
22916 jmp error_code
22917 CFI_ENDPROC
22918-END(machine_check)
22919+ENDPROC(machine_check)
22920 #endif
22921
22922 ENTRY(spurious_interrupt_bug)
22923@@ -948,7 +1188,7 @@ ENTRY(spurious_interrupt_bug)
22924 pushl_cfi $do_spurious_interrupt_bug
22925 jmp error_code
22926 CFI_ENDPROC
22927-END(spurious_interrupt_bug)
22928+ENDPROC(spurious_interrupt_bug)
22929
22930 #ifdef CONFIG_XEN
22931 /* Xen doesn't set %esp to be precisely what the normal sysenter
22932@@ -1057,7 +1297,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
22933
22934 ENTRY(mcount)
22935 ret
22936-END(mcount)
22937+ENDPROC(mcount)
22938
22939 ENTRY(ftrace_caller)
22940 pushl %eax
22941@@ -1087,7 +1327,7 @@ ftrace_graph_call:
22942 .globl ftrace_stub
22943 ftrace_stub:
22944 ret
22945-END(ftrace_caller)
22946+ENDPROC(ftrace_caller)
22947
22948 ENTRY(ftrace_regs_caller)
22949 pushf /* push flags before compare (in cs location) */
22950@@ -1185,7 +1425,7 @@ trace:
22951 popl %ecx
22952 popl %eax
22953 jmp ftrace_stub
22954-END(mcount)
22955+ENDPROC(mcount)
22956 #endif /* CONFIG_DYNAMIC_FTRACE */
22957 #endif /* CONFIG_FUNCTION_TRACER */
22958
22959@@ -1203,7 +1443,7 @@ ENTRY(ftrace_graph_caller)
22960 popl %ecx
22961 popl %eax
22962 ret
22963-END(ftrace_graph_caller)
22964+ENDPROC(ftrace_graph_caller)
22965
22966 .globl return_to_handler
22967 return_to_handler:
22968@@ -1264,15 +1504,18 @@ error_code:
22969 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
22970 REG_TO_PTGS %ecx
22971 SET_KERNEL_GS %ecx
22972- movl $(__USER_DS), %ecx
22973+ movl $(__KERNEL_DS), %ecx
22974 movl %ecx, %ds
22975 movl %ecx, %es
22976+
22977+ pax_enter_kernel
22978+
22979 TRACE_IRQS_OFF
22980 movl %esp,%eax # pt_regs pointer
22981 call *%edi
22982 jmp ret_from_exception
22983 CFI_ENDPROC
22984-END(page_fault)
22985+ENDPROC(page_fault)
22986
22987 /*
22988 * Debug traps and NMI can happen at the one SYSENTER instruction
22989@@ -1315,7 +1558,7 @@ debug_stack_correct:
22990 call do_debug
22991 jmp ret_from_exception
22992 CFI_ENDPROC
22993-END(debug)
22994+ENDPROC(debug)
22995
22996 /*
22997 * NMI is doubly nasty. It can happen _while_ we're handling
22998@@ -1355,6 +1598,9 @@ nmi_stack_correct:
22999 xorl %edx,%edx # zero error code
23000 movl %esp,%eax # pt_regs pointer
23001 call do_nmi
23002+
23003+ pax_exit_kernel
23004+
23005 jmp restore_all_notrace
23006 CFI_ENDPROC
23007
23008@@ -1392,13 +1638,16 @@ nmi_espfix_stack:
23009 FIXUP_ESPFIX_STACK # %eax == %esp
23010 xorl %edx,%edx # zero error code
23011 call do_nmi
23012+
23013+ pax_exit_kernel
23014+
23015 RESTORE_REGS
23016 lss 12+4(%esp), %esp # back to espfix stack
23017 CFI_ADJUST_CFA_OFFSET -24
23018 jmp irq_return
23019 #endif
23020 CFI_ENDPROC
23021-END(nmi)
23022+ENDPROC(nmi)
23023
23024 ENTRY(int3)
23025 RING0_INT_FRAME
23026@@ -1411,14 +1660,14 @@ ENTRY(int3)
23027 call do_int3
23028 jmp ret_from_exception
23029 CFI_ENDPROC
23030-END(int3)
23031+ENDPROC(int3)
23032
23033 ENTRY(general_protection)
23034 RING0_EC_FRAME
23035 pushl_cfi $do_general_protection
23036 jmp error_code
23037 CFI_ENDPROC
23038-END(general_protection)
23039+ENDPROC(general_protection)
23040
23041 #ifdef CONFIG_KVM_GUEST
23042 ENTRY(async_page_fault)
23043@@ -1427,6 +1676,6 @@ ENTRY(async_page_fault)
23044 pushl_cfi $do_async_page_fault
23045 jmp error_code
23046 CFI_ENDPROC
23047-END(async_page_fault)
23048+ENDPROC(async_page_fault)
23049 #endif
23050
23051diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
23052index f0095a7..ec77893 100644
23053--- a/arch/x86/kernel/entry_64.S
23054+++ b/arch/x86/kernel/entry_64.S
23055@@ -59,6 +59,8 @@
23056 #include <asm/smap.h>
23057 #include <asm/pgtable_types.h>
23058 #include <linux/err.h>
23059+#include <asm/pgtable.h>
23060+#include <asm/alternative-asm.h>
23061
23062 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
23063 #include <linux/elf-em.h>
23064@@ -81,6 +83,431 @@ ENTRY(native_usergs_sysret64)
23065 ENDPROC(native_usergs_sysret64)
23066 #endif /* CONFIG_PARAVIRT */
23067
23068+ .macro ljmpq sel, off
23069+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
23070+ .byte 0x48; ljmp *1234f(%rip)
23071+ .pushsection .rodata
23072+ .align 16
23073+ 1234: .quad \off; .word \sel
23074+ .popsection
23075+#else
23076+ pushq $\sel
23077+ pushq $\off
23078+ lretq
23079+#endif
23080+ .endm
23081+
23082+ .macro pax_enter_kernel
23083+ pax_set_fptr_mask
23084+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23085+ call pax_enter_kernel
23086+#endif
23087+ .endm
23088+
23089+ .macro pax_exit_kernel
23090+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23091+ call pax_exit_kernel
23092+#endif
23093+
23094+ .endm
23095+
23096+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23097+ENTRY(pax_enter_kernel)
23098+ pushq %rdi
23099+
23100+#ifdef CONFIG_PARAVIRT
23101+ PV_SAVE_REGS(CLBR_RDI)
23102+#endif
23103+
23104+#ifdef CONFIG_PAX_KERNEXEC
23105+ GET_CR0_INTO_RDI
23106+ bts $16,%rdi
23107+ jnc 3f
23108+ mov %cs,%edi
23109+ cmp $__KERNEL_CS,%edi
23110+ jnz 2f
23111+1:
23112+#endif
23113+
23114+#ifdef CONFIG_PAX_MEMORY_UDEREF
23115+ 661: jmp 111f
23116+ .pushsection .altinstr_replacement, "a"
23117+ 662: ASM_NOP2
23118+ .popsection
23119+ .pushsection .altinstructions, "a"
23120+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23121+ .popsection
23122+ GET_CR3_INTO_RDI
23123+ cmp $0,%dil
23124+ jnz 112f
23125+ mov $__KERNEL_DS,%edi
23126+ mov %edi,%ss
23127+ jmp 111f
23128+112: cmp $1,%dil
23129+ jz 113f
23130+ ud2
23131+113: sub $4097,%rdi
23132+ bts $63,%rdi
23133+ SET_RDI_INTO_CR3
23134+ mov $__UDEREF_KERNEL_DS,%edi
23135+ mov %edi,%ss
23136+111:
23137+#endif
23138+
23139+#ifdef CONFIG_PARAVIRT
23140+ PV_RESTORE_REGS(CLBR_RDI)
23141+#endif
23142+
23143+ popq %rdi
23144+ pax_force_retaddr
23145+ retq
23146+
23147+#ifdef CONFIG_PAX_KERNEXEC
23148+2: ljmpq __KERNEL_CS,1b
23149+3: ljmpq __KERNEXEC_KERNEL_CS,4f
23150+4: SET_RDI_INTO_CR0
23151+ jmp 1b
23152+#endif
23153+ENDPROC(pax_enter_kernel)
23154+
23155+ENTRY(pax_exit_kernel)
23156+ pushq %rdi
23157+
23158+#ifdef CONFIG_PARAVIRT
23159+ PV_SAVE_REGS(CLBR_RDI)
23160+#endif
23161+
23162+#ifdef CONFIG_PAX_KERNEXEC
23163+ mov %cs,%rdi
23164+ cmp $__KERNEXEC_KERNEL_CS,%edi
23165+ jz 2f
23166+ GET_CR0_INTO_RDI
23167+ bts $16,%rdi
23168+ jnc 4f
23169+1:
23170+#endif
23171+
23172+#ifdef CONFIG_PAX_MEMORY_UDEREF
23173+ 661: jmp 111f
23174+ .pushsection .altinstr_replacement, "a"
23175+ 662: ASM_NOP2
23176+ .popsection
23177+ .pushsection .altinstructions, "a"
23178+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23179+ .popsection
23180+ mov %ss,%edi
23181+ cmp $__UDEREF_KERNEL_DS,%edi
23182+ jnz 111f
23183+ GET_CR3_INTO_RDI
23184+ cmp $0,%dil
23185+ jz 112f
23186+ ud2
23187+112: add $4097,%rdi
23188+ bts $63,%rdi
23189+ SET_RDI_INTO_CR3
23190+ mov $__KERNEL_DS,%edi
23191+ mov %edi,%ss
23192+111:
23193+#endif
23194+
23195+#ifdef CONFIG_PARAVIRT
23196+ PV_RESTORE_REGS(CLBR_RDI);
23197+#endif
23198+
23199+ popq %rdi
23200+ pax_force_retaddr
23201+ retq
23202+
23203+#ifdef CONFIG_PAX_KERNEXEC
23204+2: GET_CR0_INTO_RDI
23205+ btr $16,%rdi
23206+ jnc 4f
23207+ ljmpq __KERNEL_CS,3f
23208+3: SET_RDI_INTO_CR0
23209+ jmp 1b
23210+4: ud2
23211+ jmp 4b
23212+#endif
23213+ENDPROC(pax_exit_kernel)
23214+#endif
23215+
23216+ .macro pax_enter_kernel_user
23217+ pax_set_fptr_mask
23218+#ifdef CONFIG_PAX_MEMORY_UDEREF
23219+ call pax_enter_kernel_user
23220+#endif
23221+ .endm
23222+
23223+ .macro pax_exit_kernel_user
23224+#ifdef CONFIG_PAX_MEMORY_UDEREF
23225+ call pax_exit_kernel_user
23226+#endif
23227+#ifdef CONFIG_PAX_RANDKSTACK
23228+ pushq %rax
23229+ pushq %r11
23230+ call pax_randomize_kstack
23231+ popq %r11
23232+ popq %rax
23233+#endif
23234+ .endm
23235+
23236+#ifdef CONFIG_PAX_MEMORY_UDEREF
23237+ENTRY(pax_enter_kernel_user)
23238+ pushq %rdi
23239+ pushq %rbx
23240+
23241+#ifdef CONFIG_PARAVIRT
23242+ PV_SAVE_REGS(CLBR_RDI)
23243+#endif
23244+
23245+ 661: jmp 111f
23246+ .pushsection .altinstr_replacement, "a"
23247+ 662: ASM_NOP2
23248+ .popsection
23249+ .pushsection .altinstructions, "a"
23250+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23251+ .popsection
23252+ GET_CR3_INTO_RDI
23253+ cmp $1,%dil
23254+ jnz 4f
23255+ sub $4097,%rdi
23256+ bts $63,%rdi
23257+ SET_RDI_INTO_CR3
23258+ jmp 3f
23259+111:
23260+
23261+ GET_CR3_INTO_RDI
23262+ mov %rdi,%rbx
23263+ add $__START_KERNEL_map,%rbx
23264+ sub phys_base(%rip),%rbx
23265+
23266+#ifdef CONFIG_PARAVIRT
23267+ cmpl $0, pv_info+PARAVIRT_enabled
23268+ jz 1f
23269+ pushq %rdi
23270+ i = 0
23271+ .rept USER_PGD_PTRS
23272+ mov i*8(%rbx),%rsi
23273+ mov $0,%sil
23274+ lea i*8(%rbx),%rdi
23275+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23276+ i = i + 1
23277+ .endr
23278+ popq %rdi
23279+ jmp 2f
23280+1:
23281+#endif
23282+
23283+ i = 0
23284+ .rept USER_PGD_PTRS
23285+ movb $0,i*8(%rbx)
23286+ i = i + 1
23287+ .endr
23288+
23289+2: SET_RDI_INTO_CR3
23290+
23291+#ifdef CONFIG_PAX_KERNEXEC
23292+ GET_CR0_INTO_RDI
23293+ bts $16,%rdi
23294+ SET_RDI_INTO_CR0
23295+#endif
23296+
23297+3:
23298+
23299+#ifdef CONFIG_PARAVIRT
23300+ PV_RESTORE_REGS(CLBR_RDI)
23301+#endif
23302+
23303+ popq %rbx
23304+ popq %rdi
23305+ pax_force_retaddr
23306+ retq
23307+4: ud2
23308+ENDPROC(pax_enter_kernel_user)
23309+
23310+ENTRY(pax_exit_kernel_user)
23311+ pushq %rdi
23312+ pushq %rbx
23313+
23314+#ifdef CONFIG_PARAVIRT
23315+ PV_SAVE_REGS(CLBR_RDI)
23316+#endif
23317+
23318+ GET_CR3_INTO_RDI
23319+ 661: jmp 1f
23320+ .pushsection .altinstr_replacement, "a"
23321+ 662: ASM_NOP2
23322+ .popsection
23323+ .pushsection .altinstructions, "a"
23324+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23325+ .popsection
23326+ cmp $0,%dil
23327+ jnz 3f
23328+ add $4097,%rdi
23329+ bts $63,%rdi
23330+ SET_RDI_INTO_CR3
23331+ jmp 2f
23332+1:
23333+
23334+ mov %rdi,%rbx
23335+
23336+#ifdef CONFIG_PAX_KERNEXEC
23337+ GET_CR0_INTO_RDI
23338+ btr $16,%rdi
23339+ jnc 3f
23340+ SET_RDI_INTO_CR0
23341+#endif
23342+
23343+ add $__START_KERNEL_map,%rbx
23344+ sub phys_base(%rip),%rbx
23345+
23346+#ifdef CONFIG_PARAVIRT
23347+ cmpl $0, pv_info+PARAVIRT_enabled
23348+ jz 1f
23349+ i = 0
23350+ .rept USER_PGD_PTRS
23351+ mov i*8(%rbx),%rsi
23352+ mov $0x67,%sil
23353+ lea i*8(%rbx),%rdi
23354+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23355+ i = i + 1
23356+ .endr
23357+ jmp 2f
23358+1:
23359+#endif
23360+
23361+ i = 0
23362+ .rept USER_PGD_PTRS
23363+ movb $0x67,i*8(%rbx)
23364+ i = i + 1
23365+ .endr
23366+2:
23367+
23368+#ifdef CONFIG_PARAVIRT
23369+ PV_RESTORE_REGS(CLBR_RDI)
23370+#endif
23371+
23372+ popq %rbx
23373+ popq %rdi
23374+ pax_force_retaddr
23375+ retq
23376+3: ud2
23377+ENDPROC(pax_exit_kernel_user)
23378+#endif
23379+
23380+ .macro pax_enter_kernel_nmi
23381+ pax_set_fptr_mask
23382+
23383+#ifdef CONFIG_PAX_KERNEXEC
23384+ GET_CR0_INTO_RDI
23385+ bts $16,%rdi
23386+ jc 110f
23387+ SET_RDI_INTO_CR0
23388+ or $2,%ebx
23389+110:
23390+#endif
23391+
23392+#ifdef CONFIG_PAX_MEMORY_UDEREF
23393+ 661: jmp 111f
23394+ .pushsection .altinstr_replacement, "a"
23395+ 662: ASM_NOP2
23396+ .popsection
23397+ .pushsection .altinstructions, "a"
23398+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23399+ .popsection
23400+ GET_CR3_INTO_RDI
23401+ cmp $0,%dil
23402+ jz 111f
23403+ sub $4097,%rdi
23404+ or $4,%ebx
23405+ bts $63,%rdi
23406+ SET_RDI_INTO_CR3
23407+ mov $__UDEREF_KERNEL_DS,%edi
23408+ mov %edi,%ss
23409+111:
23410+#endif
23411+ .endm
23412+
23413+ .macro pax_exit_kernel_nmi
23414+#ifdef CONFIG_PAX_KERNEXEC
23415+ btr $1,%ebx
23416+ jnc 110f
23417+ GET_CR0_INTO_RDI
23418+ btr $16,%rdi
23419+ SET_RDI_INTO_CR0
23420+110:
23421+#endif
23422+
23423+#ifdef CONFIG_PAX_MEMORY_UDEREF
23424+ btr $2,%ebx
23425+ jnc 111f
23426+ GET_CR3_INTO_RDI
23427+ add $4097,%rdi
23428+ bts $63,%rdi
23429+ SET_RDI_INTO_CR3
23430+ mov $__KERNEL_DS,%edi
23431+ mov %edi,%ss
23432+111:
23433+#endif
23434+ .endm
23435+
23436+ .macro pax_erase_kstack
23437+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23438+ call pax_erase_kstack
23439+#endif
23440+ .endm
23441+
23442+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23443+ENTRY(pax_erase_kstack)
23444+ pushq %rdi
23445+ pushq %rcx
23446+ pushq %rax
23447+ pushq %r11
23448+
23449+ GET_THREAD_INFO(%r11)
23450+ mov TI_lowest_stack(%r11), %rdi
23451+ mov $-0xBEEF, %rax
23452+ std
23453+
23454+1: mov %edi, %ecx
23455+ and $THREAD_SIZE_asm - 1, %ecx
23456+ shr $3, %ecx
23457+ repne scasq
23458+ jecxz 2f
23459+
23460+ cmp $2*8, %ecx
23461+ jc 2f
23462+
23463+ mov $2*8, %ecx
23464+ repe scasq
23465+ jecxz 2f
23466+ jne 1b
23467+
23468+2: cld
23469+ or $2*8, %rdi
23470+ mov %esp, %ecx
23471+ sub %edi, %ecx
23472+
23473+ cmp $THREAD_SIZE_asm, %rcx
23474+ jb 3f
23475+ ud2
23476+3:
23477+
23478+ shr $3, %ecx
23479+ rep stosq
23480+
23481+ mov TI_task_thread_sp0(%r11), %rdi
23482+ sub $256, %rdi
23483+ mov %rdi, TI_lowest_stack(%r11)
23484+
23485+ popq %r11
23486+ popq %rax
23487+ popq %rcx
23488+ popq %rdi
23489+ pax_force_retaddr
23490+ ret
23491+ENDPROC(pax_erase_kstack)
23492+#endif
23493
23494 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
23495 #ifdef CONFIG_TRACE_IRQFLAGS
23496@@ -117,7 +544,7 @@ ENDPROC(native_usergs_sysret64)
23497 .endm
23498
23499 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
23500- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
23501+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
23502 jnc 1f
23503 TRACE_IRQS_ON_DEBUG
23504 1:
23505@@ -243,9 +670,52 @@ ENTRY(save_paranoid)
23506 js 1f /* negative -> in kernel */
23507 SWAPGS
23508 xorl %ebx,%ebx
23509-1: ret
23510+1:
23511+#ifdef CONFIG_PAX_MEMORY_UDEREF
23512+ testb $3, CS+8(%rsp)
23513+ jnz 1f
23514+ pax_enter_kernel
23515+ jmp 2f
23516+1: pax_enter_kernel_user
23517+2:
23518+#else
23519+ pax_enter_kernel
23520+#endif
23521+ pax_force_retaddr
23522+ ret
23523 CFI_ENDPROC
23524-END(save_paranoid)
23525+ENDPROC(save_paranoid)
23526+
23527+ENTRY(save_paranoid_nmi)
23528+ XCPT_FRAME 1 RDI+8
23529+ cld
23530+ movq_cfi rdi, RDI+8
23531+ movq_cfi rsi, RSI+8
23532+ movq_cfi rdx, RDX+8
23533+ movq_cfi rcx, RCX+8
23534+ movq_cfi rax, RAX+8
23535+ movq_cfi r8, R8+8
23536+ movq_cfi r9, R9+8
23537+ movq_cfi r10, R10+8
23538+ movq_cfi r11, R11+8
23539+ movq_cfi rbx, RBX+8
23540+ movq_cfi rbp, RBP+8
23541+ movq_cfi r12, R12+8
23542+ movq_cfi r13, R13+8
23543+ movq_cfi r14, R14+8
23544+ movq_cfi r15, R15+8
23545+ movl $1,%ebx
23546+ movl $MSR_GS_BASE,%ecx
23547+ rdmsr
23548+ testl %edx,%edx
23549+ js 1f /* negative -> in kernel */
23550+ SWAPGS
23551+ xorl %ebx,%ebx
23552+1: pax_enter_kernel_nmi
23553+ pax_force_retaddr
23554+ ret
23555+ CFI_ENDPROC
23556+ENDPROC(save_paranoid_nmi)
23557
23558 /*
23559 * A newly forked process directly context switches into this address.
23560@@ -266,7 +736,7 @@ ENTRY(ret_from_fork)
23561
23562 RESTORE_REST
23563
23564- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23565+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23566 jz 1f
23567
23568 /*
23569@@ -279,15 +749,13 @@ ENTRY(ret_from_fork)
23570 jmp int_ret_from_sys_call
23571
23572 1:
23573- subq $REST_SKIP, %rsp # leave space for volatiles
23574- CFI_ADJUST_CFA_OFFSET REST_SKIP
23575 movq %rbp, %rdi
23576 call *%rbx
23577 movl $0, RAX(%rsp)
23578 RESTORE_REST
23579 jmp int_ret_from_sys_call
23580 CFI_ENDPROC
23581-END(ret_from_fork)
23582+ENDPROC(ret_from_fork)
23583
23584 /*
23585 * System call entry. Up to 6 arguments in registers are supported.
23586@@ -324,7 +792,7 @@ END(ret_from_fork)
23587 ENTRY(system_call)
23588 CFI_STARTPROC simple
23589 CFI_SIGNAL_FRAME
23590- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
23591+ CFI_DEF_CFA rsp,0
23592 CFI_REGISTER rip,rcx
23593 /*CFI_REGISTER rflags,r11*/
23594 SWAPGS_UNSAFE_STACK
23595@@ -337,16 +805,23 @@ GLOBAL(system_call_after_swapgs)
23596
23597 movq %rsp,PER_CPU_VAR(old_rsp)
23598 movq PER_CPU_VAR(kernel_stack),%rsp
23599+ SAVE_ARGS 8*6, 0, rax_enosys=1
23600+ pax_enter_kernel_user
23601+
23602+#ifdef CONFIG_PAX_RANDKSTACK
23603+ pax_erase_kstack
23604+#endif
23605+
23606 /*
23607 * No need to follow this irqs off/on section - it's straight
23608 * and short:
23609 */
23610 ENABLE_INTERRUPTS(CLBR_NONE)
23611- SAVE_ARGS 8, 0, rax_enosys=1
23612 movq_cfi rax,(ORIG_RAX-ARGOFFSET)
23613 movq %rcx,RIP-ARGOFFSET(%rsp)
23614 CFI_REL_OFFSET rip,RIP-ARGOFFSET
23615- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23616+ GET_THREAD_INFO(%rcx)
23617+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
23618 jnz tracesys
23619 system_call_fastpath:
23620 #if __SYSCALL_MASK == ~0
23621@@ -376,10 +851,13 @@ ret_from_sys_call:
23622 * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
23623 * very bad.
23624 */
23625- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23626+ GET_THREAD_INFO(%rcx)
23627+ testl $_TIF_ALLWORK_MASK,TI_flags(%rcx)
23628 jnz int_ret_from_sys_call_fixup /* Go the the slow path */
23629
23630 CFI_REMEMBER_STATE
23631+ pax_exit_kernel_user
23632+ pax_erase_kstack
23633 /*
23634 * sysretq will re-enable interrupts:
23635 */
23636@@ -399,12 +877,15 @@ int_ret_from_sys_call_fixup:
23637
23638 /* Do syscall tracing */
23639 tracesys:
23640- leaq -REST_SKIP(%rsp), %rdi
23641+ movq %rsp, %rdi
23642 movq $AUDIT_ARCH_X86_64, %rsi
23643 call syscall_trace_enter_phase1
23644 test %rax, %rax
23645 jnz tracesys_phase2 /* if needed, run the slow path */
23646- LOAD_ARGS 0 /* else restore clobbered regs */
23647+
23648+ pax_erase_kstack
23649+
23650+ LOAD_ARGS /* else restore clobbered regs */
23651 jmp system_call_fastpath /* and return to the fast path */
23652
23653 tracesys_phase2:
23654@@ -415,12 +896,14 @@ tracesys_phase2:
23655 movq %rax,%rdx
23656 call syscall_trace_enter_phase2
23657
23658+ pax_erase_kstack
23659+
23660 /*
23661 * Reload arg registers from stack in case ptrace changed them.
23662 * We don't reload %rax because syscall_trace_entry_phase2() returned
23663 * the value it wants us to use in the table lookup.
23664 */
23665- LOAD_ARGS ARGOFFSET, 1
23666+ LOAD_ARGS 1
23667 RESTORE_REST
23668 #if __SYSCALL_MASK == ~0
23669 cmpq $__NR_syscall_max,%rax
23670@@ -451,7 +934,9 @@ GLOBAL(int_with_check)
23671 andl %edi,%edx
23672 jnz int_careful
23673 andl $~TS_COMPAT,TI_status(%rcx)
23674- jmp retint_swapgs
23675+ pax_exit_kernel_user
23676+ pax_erase_kstack
23677+ jmp retint_swapgs_pax
23678
23679 /* Either reschedule or signal or syscall exit tracking needed. */
23680 /* First do a reschedule test. */
23681@@ -497,7 +982,7 @@ int_restore_rest:
23682 TRACE_IRQS_OFF
23683 jmp int_with_check
23684 CFI_ENDPROC
23685-END(system_call)
23686+ENDPROC(system_call)
23687
23688 .macro FORK_LIKE func
23689 ENTRY(stub_\func)
23690@@ -510,9 +995,10 @@ ENTRY(stub_\func)
23691 DEFAULT_FRAME 0 8 /* offset 8: return address */
23692 call sys_\func
23693 RESTORE_TOP_OF_STACK %r11, 8
23694- ret $REST_SKIP /* pop extended registers */
23695+ pax_force_retaddr
23696+ ret
23697 CFI_ENDPROC
23698-END(stub_\func)
23699+ENDPROC(stub_\func)
23700 .endm
23701
23702 .macro FIXED_FRAME label,func
23703@@ -522,9 +1008,10 @@ ENTRY(\label)
23704 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
23705 call \func
23706 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
23707+ pax_force_retaddr
23708 ret
23709 CFI_ENDPROC
23710-END(\label)
23711+ENDPROC(\label)
23712 .endm
23713
23714 FORK_LIKE clone
23715@@ -543,7 +1030,7 @@ ENTRY(stub_execve)
23716 RESTORE_REST
23717 jmp int_ret_from_sys_call
23718 CFI_ENDPROC
23719-END(stub_execve)
23720+ENDPROC(stub_execve)
23721
23722 ENTRY(stub_execveat)
23723 CFI_STARTPROC
23724@@ -557,7 +1044,7 @@ ENTRY(stub_execveat)
23725 RESTORE_REST
23726 jmp int_ret_from_sys_call
23727 CFI_ENDPROC
23728-END(stub_execveat)
23729+ENDPROC(stub_execveat)
23730
23731 /*
23732 * sigreturn is special because it needs to restore all registers on return.
23733@@ -574,7 +1061,7 @@ ENTRY(stub_rt_sigreturn)
23734 RESTORE_REST
23735 jmp int_ret_from_sys_call
23736 CFI_ENDPROC
23737-END(stub_rt_sigreturn)
23738+ENDPROC(stub_rt_sigreturn)
23739
23740 #ifdef CONFIG_X86_X32_ABI
23741 ENTRY(stub_x32_rt_sigreturn)
23742@@ -588,7 +1075,7 @@ ENTRY(stub_x32_rt_sigreturn)
23743 RESTORE_REST
23744 jmp int_ret_from_sys_call
23745 CFI_ENDPROC
23746-END(stub_x32_rt_sigreturn)
23747+ENDPROC(stub_x32_rt_sigreturn)
23748
23749 ENTRY(stub_x32_execve)
23750 CFI_STARTPROC
23751@@ -602,7 +1089,7 @@ ENTRY(stub_x32_execve)
23752 RESTORE_REST
23753 jmp int_ret_from_sys_call
23754 CFI_ENDPROC
23755-END(stub_x32_execve)
23756+ENDPROC(stub_x32_execve)
23757
23758 ENTRY(stub_x32_execveat)
23759 CFI_STARTPROC
23760@@ -616,7 +1103,7 @@ ENTRY(stub_x32_execveat)
23761 RESTORE_REST
23762 jmp int_ret_from_sys_call
23763 CFI_ENDPROC
23764-END(stub_x32_execveat)
23765+ENDPROC(stub_x32_execveat)
23766
23767 #endif
23768
23769@@ -653,7 +1140,7 @@ vector=vector+1
23770 2: jmp common_interrupt
23771 .endr
23772 CFI_ENDPROC
23773-END(irq_entries_start)
23774+ENDPROC(irq_entries_start)
23775
23776 .previous
23777 END(interrupt)
23778@@ -670,28 +1157,29 @@ END(interrupt)
23779 /* 0(%rsp): ~(interrupt number) */
23780 .macro interrupt func
23781 /* reserve pt_regs for scratch regs and rbp */
23782- subq $ORIG_RAX-RBP, %rsp
23783- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
23784+ subq $ORIG_RAX, %rsp
23785+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
23786 cld
23787- /* start from rbp in pt_regs and jump over */
23788- movq_cfi rdi, (RDI-RBP)
23789- movq_cfi rsi, (RSI-RBP)
23790- movq_cfi rdx, (RDX-RBP)
23791- movq_cfi rcx, (RCX-RBP)
23792- movq_cfi rax, (RAX-RBP)
23793- movq_cfi r8, (R8-RBP)
23794- movq_cfi r9, (R9-RBP)
23795- movq_cfi r10, (R10-RBP)
23796- movq_cfi r11, (R11-RBP)
23797+ /* start from r15 in pt_regs and jump over */
23798+ movq_cfi rdi, RDI
23799+ movq_cfi rsi, RSI
23800+ movq_cfi rdx, RDX
23801+ movq_cfi rcx, RCX
23802+ movq_cfi rax, RAX
23803+ movq_cfi r8, R8
23804+ movq_cfi r9, R9
23805+ movq_cfi r10, R10
23806+ movq_cfi r11, R11
23807+ movq_cfi r12, R12
23808
23809 /* Save rbp so that we can unwind from get_irq_regs() */
23810- movq_cfi rbp, 0
23811+ movq_cfi rbp, RBP
23812
23813 /* Save previous stack value */
23814 movq %rsp, %rsi
23815
23816- leaq -RBP(%rsp),%rdi /* arg1 for handler */
23817- testl $3, CS-RBP(%rsi)
23818+ movq %rsp,%rdi /* arg1 for handler */
23819+ testb $3, CS(%rsi)
23820 je 1f
23821 SWAPGS
23822 /*
23823@@ -711,6 +1199,18 @@ END(interrupt)
23824 0x06 /* DW_OP_deref */, \
23825 0x08 /* DW_OP_const1u */, SS+8-RBP, \
23826 0x22 /* DW_OP_plus */
23827+
23828+#ifdef CONFIG_PAX_MEMORY_UDEREF
23829+ testb $3, CS(%rdi)
23830+ jnz 1f
23831+ pax_enter_kernel
23832+ jmp 2f
23833+1: pax_enter_kernel_user
23834+2:
23835+#else
23836+ pax_enter_kernel
23837+#endif
23838+
23839 /* We entered an interrupt context - irqs are off: */
23840 TRACE_IRQS_OFF
23841
23842@@ -735,14 +1235,14 @@ ret_from_intr:
23843
23844 /* Restore saved previous stack */
23845 popq %rsi
23846- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
23847- leaq ARGOFFSET-RBP(%rsi), %rsp
23848+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
23849+ movq %rsi, %rsp
23850 CFI_DEF_CFA_REGISTER rsp
23851- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
23852+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
23853
23854 exit_intr:
23855 GET_THREAD_INFO(%rcx)
23856- testl $3,CS-ARGOFFSET(%rsp)
23857+ testb $3,CS-ARGOFFSET(%rsp)
23858 je retint_kernel
23859
23860 /* Interrupt came from user space */
23861@@ -764,14 +1264,16 @@ retint_swapgs: /* return to user-space */
23862 * The iretq could re-enable interrupts:
23863 */
23864 DISABLE_INTERRUPTS(CLBR_ANY)
23865+ pax_exit_kernel_user
23866+retint_swapgs_pax:
23867 TRACE_IRQS_IRETQ
23868
23869 /*
23870 * Try to use SYSRET instead of IRET if we're returning to
23871 * a completely clean 64-bit userspace context.
23872 */
23873- movq (RCX-R11)(%rsp), %rcx
23874- cmpq %rcx,(RIP-R11)(%rsp) /* RCX == RIP */
23875+ movq (RCX-ARGOFFSET)(%rsp), %rcx
23876+ cmpq %rcx,(RIP-ARGOFFSET)(%rsp) /* RCX == RIP */
23877 jne opportunistic_sysret_failed
23878
23879 /*
23880@@ -792,7 +1294,7 @@ retint_swapgs: /* return to user-space */
23881 shr $__VIRTUAL_MASK_SHIFT, %rcx
23882 jnz opportunistic_sysret_failed
23883
23884- cmpq $__USER_CS,(CS-R11)(%rsp) /* CS must match SYSRET */
23885+ cmpq $__USER_CS,(CS-ARGOFFSET)(%rsp) /* CS must match SYSRET */
23886 jne opportunistic_sysret_failed
23887
23888 movq (R11-ARGOFFSET)(%rsp), %r11
23889@@ -838,6 +1340,27 @@ opportunistic_sysret_failed:
23890
23891 retint_restore_args: /* return to kernel space */
23892 DISABLE_INTERRUPTS(CLBR_ANY)
23893+ pax_exit_kernel
23894+
23895+#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC)
23896+ /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
23897+ * namely calling EFI runtime services with a phys mapping. We're
23898+ * starting off with NOPs and patch in the real instrumentation
23899+ * (BTS/OR) before starting any userland process; even before starting
23900+ * up the APs.
23901+ */
23902+ .pushsection .altinstr_replacement, "a"
23903+ 601: pax_force_retaddr (RIP-ARGOFFSET)
23904+ 602:
23905+ .popsection
23906+ 603: .fill 602b-601b, 1, 0x90
23907+ .pushsection .altinstructions, "a"
23908+ altinstruction_entry 603b, 601b, X86_FEATURE_ALWAYS, 602b-601b, 602b-601b
23909+ .popsection
23910+#else
23911+ pax_force_retaddr (RIP-ARGOFFSET)
23912+#endif
23913+
23914 /*
23915 * The iretq could re-enable interrupts:
23916 */
23917@@ -875,15 +1398,15 @@ native_irq_return_ldt:
23918 SWAPGS
23919 movq PER_CPU_VAR(espfix_waddr),%rdi
23920 movq %rax,(0*8)(%rdi) /* RAX */
23921- movq (2*8)(%rsp),%rax /* RIP */
23922+ movq (2*8 + RIP-RIP)(%rsp),%rax /* RIP */
23923 movq %rax,(1*8)(%rdi)
23924- movq (3*8)(%rsp),%rax /* CS */
23925+ movq (2*8 + CS-RIP)(%rsp),%rax /* CS */
23926 movq %rax,(2*8)(%rdi)
23927- movq (4*8)(%rsp),%rax /* RFLAGS */
23928+ movq (2*8 + EFLAGS-RIP)(%rsp),%rax /* RFLAGS */
23929 movq %rax,(3*8)(%rdi)
23930- movq (6*8)(%rsp),%rax /* SS */
23931+ movq (2*8 + SS-RIP)(%rsp),%rax /* SS */
23932 movq %rax,(5*8)(%rdi)
23933- movq (5*8)(%rsp),%rax /* RSP */
23934+ movq (2*8 + RSP-RIP)(%rsp),%rax /* RSP */
23935 movq %rax,(4*8)(%rdi)
23936 andl $0xffff0000,%eax
23937 popq_cfi %rdi
23938@@ -937,7 +1460,7 @@ ENTRY(retint_kernel)
23939 jmp exit_intr
23940 #endif
23941 CFI_ENDPROC
23942-END(common_interrupt)
23943+ENDPROC(common_interrupt)
23944
23945 /*
23946 * APIC interrupts.
23947@@ -951,7 +1474,7 @@ ENTRY(\sym)
23948 interrupt \do_sym
23949 jmp ret_from_intr
23950 CFI_ENDPROC
23951-END(\sym)
23952+ENDPROC(\sym)
23953 .endm
23954
23955 #ifdef CONFIG_TRACING
23956@@ -1024,7 +1547,7 @@ apicinterrupt IRQ_WORK_VECTOR \
23957 /*
23958 * Exception entry points.
23959 */
23960-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
23961+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
23962
23963 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
23964 ENTRY(\sym)
23965@@ -1080,6 +1603,12 @@ ENTRY(\sym)
23966 .endif
23967
23968 .if \shift_ist != -1
23969+#ifdef CONFIG_SMP
23970+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
23971+ lea init_tss(%r13), %r13
23972+#else
23973+ lea init_tss(%rip), %r13
23974+#endif
23975 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\shift_ist)
23976 .endif
23977
23978@@ -1126,7 +1655,7 @@ ENTRY(\sym)
23979 .endif
23980
23981 CFI_ENDPROC
23982-END(\sym)
23983+ENDPROC(\sym)
23984 .endm
23985
23986 #ifdef CONFIG_TRACING
23987@@ -1167,9 +1696,10 @@ gs_change:
23988 2: mfence /* workaround */
23989 SWAPGS
23990 popfq_cfi
23991+ pax_force_retaddr
23992 ret
23993 CFI_ENDPROC
23994-END(native_load_gs_index)
23995+ENDPROC(native_load_gs_index)
23996
23997 _ASM_EXTABLE(gs_change,bad_gs)
23998 .section .fixup,"ax"
23999@@ -1197,9 +1727,10 @@ ENTRY(do_softirq_own_stack)
24000 CFI_DEF_CFA_REGISTER rsp
24001 CFI_ADJUST_CFA_OFFSET -8
24002 decl PER_CPU_VAR(irq_count)
24003+ pax_force_retaddr
24004 ret
24005 CFI_ENDPROC
24006-END(do_softirq_own_stack)
24007+ENDPROC(do_softirq_own_stack)
24008
24009 #ifdef CONFIG_XEN
24010 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
24011@@ -1240,7 +1771,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
24012 #endif
24013 jmp error_exit
24014 CFI_ENDPROC
24015-END(xen_do_hypervisor_callback)
24016+ENDPROC(xen_do_hypervisor_callback)
24017
24018 /*
24019 * Hypervisor uses this for application faults while it executes.
24020@@ -1299,7 +1830,7 @@ ENTRY(xen_failsafe_callback)
24021 SAVE_ALL
24022 jmp error_exit
24023 CFI_ENDPROC
24024-END(xen_failsafe_callback)
24025+ENDPROC(xen_failsafe_callback)
24026
24027 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
24028 xen_hvm_callback_vector xen_evtchn_do_upcall
24029@@ -1344,18 +1875,25 @@ ENTRY(paranoid_exit)
24030 DEFAULT_FRAME
24031 DISABLE_INTERRUPTS(CLBR_NONE)
24032 TRACE_IRQS_OFF_DEBUG
24033- testl %ebx,%ebx /* swapgs needed? */
24034+ testl $1,%ebx /* swapgs needed? */
24035 jnz paranoid_restore
24036+#ifdef CONFIG_PAX_MEMORY_UDEREF
24037+ pax_exit_kernel_user
24038+#else
24039+ pax_exit_kernel
24040+#endif
24041 TRACE_IRQS_IRETQ 0
24042 SWAPGS_UNSAFE_STACK
24043 RESTORE_ALL 8
24044 INTERRUPT_RETURN
24045 paranoid_restore:
24046+ pax_exit_kernel
24047 TRACE_IRQS_IRETQ_DEBUG 0
24048 RESTORE_ALL 8
24049+ pax_force_retaddr_bts
24050 INTERRUPT_RETURN
24051 CFI_ENDPROC
24052-END(paranoid_exit)
24053+ENDPROC(paranoid_exit)
24054
24055 /*
24056 * Exception entry point. This expects an error code/orig_rax on the stack.
24057@@ -1382,12 +1920,23 @@ ENTRY(error_entry)
24058 movq %r14, R14+8(%rsp)
24059 movq %r15, R15+8(%rsp)
24060 xorl %ebx,%ebx
24061- testl $3,CS+8(%rsp)
24062+ testb $3,CS+8(%rsp)
24063 je error_kernelspace
24064 error_swapgs:
24065 SWAPGS
24066 error_sti:
24067+#ifdef CONFIG_PAX_MEMORY_UDEREF
24068+ testb $3, CS+8(%rsp)
24069+ jnz 1f
24070+ pax_enter_kernel
24071+ jmp 2f
24072+1: pax_enter_kernel_user
24073+2:
24074+#else
24075+ pax_enter_kernel
24076+#endif
24077 TRACE_IRQS_OFF
24078+ pax_force_retaddr
24079 ret
24080
24081 /*
24082@@ -1422,7 +1971,7 @@ error_bad_iret:
24083 decl %ebx /* Return to usergs */
24084 jmp error_sti
24085 CFI_ENDPROC
24086-END(error_entry)
24087+ENDPROC(error_entry)
24088
24089
24090 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
24091@@ -1433,7 +1982,7 @@ ENTRY(error_exit)
24092 DISABLE_INTERRUPTS(CLBR_NONE)
24093 TRACE_IRQS_OFF
24094 GET_THREAD_INFO(%rcx)
24095- testl %eax,%eax
24096+ testl $1,%eax
24097 jne retint_kernel
24098 LOCKDEP_SYS_EXIT_IRQ
24099 movl TI_flags(%rcx),%edx
24100@@ -1442,7 +1991,7 @@ ENTRY(error_exit)
24101 jnz retint_careful
24102 jmp retint_swapgs
24103 CFI_ENDPROC
24104-END(error_exit)
24105+ENDPROC(error_exit)
24106
24107 /*
24108 * Test if a given stack is an NMI stack or not.
24109@@ -1500,9 +2049,11 @@ ENTRY(nmi)
24110 * If %cs was not the kernel segment, then the NMI triggered in user
24111 * space, which means it is definitely not nested.
24112 */
24113+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
24114+ je 1f
24115 cmpl $__KERNEL_CS, 16(%rsp)
24116 jne first_nmi
24117-
24118+1:
24119 /*
24120 * Check the special variable on the stack to see if NMIs are
24121 * executing.
24122@@ -1536,8 +2087,7 @@ nested_nmi:
24123
24124 1:
24125 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
24126- leaq -1*8(%rsp), %rdx
24127- movq %rdx, %rsp
24128+ subq $8, %rsp
24129 CFI_ADJUST_CFA_OFFSET 1*8
24130 leaq -10*8(%rsp), %rdx
24131 pushq_cfi $__KERNEL_DS
24132@@ -1555,6 +2105,7 @@ nested_nmi_out:
24133 CFI_RESTORE rdx
24134
24135 /* No need to check faults here */
24136+# pax_force_retaddr_bts
24137 INTERRUPT_RETURN
24138
24139 CFI_RESTORE_STATE
24140@@ -1651,13 +2202,13 @@ end_repeat_nmi:
24141 subq $ORIG_RAX-R15, %rsp
24142 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
24143 /*
24144- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
24145+ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
24146 * as we should not be calling schedule in NMI context.
24147 * Even with normal interrupts enabled. An NMI should not be
24148 * setting NEED_RESCHED or anything that normal interrupts and
24149 * exceptions might do.
24150 */
24151- call save_paranoid
24152+ call save_paranoid_nmi
24153 DEFAULT_FRAME 0
24154
24155 /*
24156@@ -1667,9 +2218,9 @@ end_repeat_nmi:
24157 * NMI itself takes a page fault, the page fault that was preempted
24158 * will read the information from the NMI page fault and not the
24159 * origin fault. Save it off and restore it if it changes.
24160- * Use the r12 callee-saved register.
24161+ * Use the r13 callee-saved register.
24162 */
24163- movq %cr2, %r12
24164+ movq %cr2, %r13
24165
24166 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
24167 movq %rsp,%rdi
24168@@ -1678,29 +2229,34 @@ end_repeat_nmi:
24169
24170 /* Did the NMI take a page fault? Restore cr2 if it did */
24171 movq %cr2, %rcx
24172- cmpq %rcx, %r12
24173+ cmpq %rcx, %r13
24174 je 1f
24175- movq %r12, %cr2
24176+ movq %r13, %cr2
24177 1:
24178
24179- testl %ebx,%ebx /* swapgs needed? */
24180+ testl $1,%ebx /* swapgs needed? */
24181 jnz nmi_restore
24182 nmi_swapgs:
24183 SWAPGS_UNSAFE_STACK
24184 nmi_restore:
24185+ pax_exit_kernel_nmi
24186 /* Pop the extra iret frame at once */
24187 RESTORE_ALL 6*8
24188+ testb $3, 8(%rsp)
24189+ jnz 1f
24190+ pax_force_retaddr_bts
24191+1:
24192
24193 /* Clear the NMI executing stack variable */
24194 movq $0, 5*8(%rsp)
24195 jmp irq_return
24196 CFI_ENDPROC
24197-END(nmi)
24198+ENDPROC(nmi)
24199
24200 ENTRY(ignore_sysret)
24201 CFI_STARTPROC
24202 mov $-ENOSYS,%eax
24203 sysret
24204 CFI_ENDPROC
24205-END(ignore_sysret)
24206+ENDPROC(ignore_sysret)
24207
24208diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
24209index f5d0730..5bce89c 100644
24210--- a/arch/x86/kernel/espfix_64.c
24211+++ b/arch/x86/kernel/espfix_64.c
24212@@ -70,8 +70,7 @@ static DEFINE_MUTEX(espfix_init_mutex);
24213 #define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
24214 static void *espfix_pages[ESPFIX_MAX_PAGES];
24215
24216-static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
24217- __aligned(PAGE_SIZE);
24218+static pud_t espfix_pud_page[PTRS_PER_PUD] __page_aligned_rodata;
24219
24220 static unsigned int page_random, slot_random;
24221
24222@@ -122,11 +121,17 @@ static void init_espfix_random(void)
24223 void __init init_espfix_bsp(void)
24224 {
24225 pgd_t *pgd_p;
24226+ unsigned long index = pgd_index(ESPFIX_BASE_ADDR);
24227
24228 /* Install the espfix pud into the kernel page directory */
24229- pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
24230+ pgd_p = &init_level4_pgt[index];
24231 pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
24232
24233+#ifdef CONFIG_PAX_PER_CPU_PGD
24234+ clone_pgd_range(get_cpu_pgd(0, kernel) + index, swapper_pg_dir + index, 1);
24235+ clone_pgd_range(get_cpu_pgd(0, user) + index, swapper_pg_dir + index, 1);
24236+#endif
24237+
24238 /* Randomize the locations */
24239 init_espfix_random();
24240
24241@@ -194,7 +199,7 @@ void init_espfix_ap(void)
24242 set_pte(&pte_p[n*PTE_STRIDE], pte);
24243
24244 /* Job is done for this CPU and any CPU which shares this page */
24245- ACCESS_ONCE(espfix_pages[page]) = stack_page;
24246+ ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
24247
24248 unlock_done:
24249 mutex_unlock(&espfix_init_mutex);
24250diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
24251index 8b7b0a5..2395f29 100644
24252--- a/arch/x86/kernel/ftrace.c
24253+++ b/arch/x86/kernel/ftrace.c
24254@@ -89,7 +89,7 @@ static unsigned long text_ip_addr(unsigned long ip)
24255 * kernel identity mapping to modify code.
24256 */
24257 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
24258- ip = (unsigned long)__va(__pa_symbol(ip));
24259+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
24260
24261 return ip;
24262 }
24263@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
24264 {
24265 unsigned char replaced[MCOUNT_INSN_SIZE];
24266
24267+ ip = ktla_ktva(ip);
24268+
24269 /*
24270 * Note: Due to modules and __init, code can
24271 * disappear and change, we need to protect against faulting
24272@@ -230,7 +232,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
24273 unsigned char old[MCOUNT_INSN_SIZE];
24274 int ret;
24275
24276- memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
24277+ memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
24278
24279 ftrace_update_func = ip;
24280 /* Make sure the breakpoints see the ftrace_update_func update */
24281@@ -311,7 +313,7 @@ static int add_break(unsigned long ip, const char *old)
24282 unsigned char replaced[MCOUNT_INSN_SIZE];
24283 unsigned char brk = BREAKPOINT_INSTRUCTION;
24284
24285- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
24286+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
24287 return -EFAULT;
24288
24289 /* Make sure it is what we expect it to be */
24290diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
24291index c4f8d46..2d63ae2 100644
24292--- a/arch/x86/kernel/head64.c
24293+++ b/arch/x86/kernel/head64.c
24294@@ -68,12 +68,12 @@ again:
24295 pgd = *pgd_p;
24296
24297 /*
24298- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
24299- * critical -- __PAGE_OFFSET would point us back into the dynamic
24300+ * The use of __early_va rather than __va here is critical:
24301+ * __va would point us back into the dynamic
24302 * range and we might end up looping forever...
24303 */
24304 if (pgd)
24305- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24306+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
24307 else {
24308 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24309 reset_early_page_tables();
24310@@ -83,13 +83,13 @@ again:
24311 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
24312 for (i = 0; i < PTRS_PER_PUD; i++)
24313 pud_p[i] = 0;
24314- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24315+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
24316 }
24317 pud_p += pud_index(address);
24318 pud = *pud_p;
24319
24320 if (pud)
24321- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24322+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
24323 else {
24324 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24325 reset_early_page_tables();
24326@@ -99,7 +99,7 @@ again:
24327 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
24328 for (i = 0; i < PTRS_PER_PMD; i++)
24329 pmd_p[i] = 0;
24330- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24331+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
24332 }
24333 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
24334 pmd_p[pmd_index(address)] = pmd;
24335@@ -180,7 +180,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
24336 if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
24337 early_printk("Kernel alive\n");
24338
24339- clear_page(init_level4_pgt);
24340 /* set init_level4_pgt kernel high mapping*/
24341 init_level4_pgt[511] = early_level4_pgt[511];
24342
24343diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
24344index f36bd42..0ab4474 100644
24345--- a/arch/x86/kernel/head_32.S
24346+++ b/arch/x86/kernel/head_32.S
24347@@ -26,6 +26,12 @@
24348 /* Physical address */
24349 #define pa(X) ((X) - __PAGE_OFFSET)
24350
24351+#ifdef CONFIG_PAX_KERNEXEC
24352+#define ta(X) (X)
24353+#else
24354+#define ta(X) ((X) - __PAGE_OFFSET)
24355+#endif
24356+
24357 /*
24358 * References to members of the new_cpu_data structure.
24359 */
24360@@ -55,11 +61,7 @@
24361 * and small than max_low_pfn, otherwise will waste some page table entries
24362 */
24363
24364-#if PTRS_PER_PMD > 1
24365-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
24366-#else
24367-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
24368-#endif
24369+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
24370
24371 /* Number of possible pages in the lowmem region */
24372 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
24373@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
24374 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24375
24376 /*
24377+ * Real beginning of normal "text" segment
24378+ */
24379+ENTRY(stext)
24380+ENTRY(_stext)
24381+
24382+/*
24383 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
24384 * %esi points to the real-mode code as a 32-bit pointer.
24385 * CS and DS must be 4 GB flat segments, but we don't depend on
24386@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24387 * can.
24388 */
24389 __HEAD
24390+
24391+#ifdef CONFIG_PAX_KERNEXEC
24392+ jmp startup_32
24393+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
24394+.fill PAGE_SIZE-5,1,0xcc
24395+#endif
24396+
24397 ENTRY(startup_32)
24398 movl pa(stack_start),%ecx
24399
24400@@ -106,6 +121,59 @@ ENTRY(startup_32)
24401 2:
24402 leal -__PAGE_OFFSET(%ecx),%esp
24403
24404+#ifdef CONFIG_SMP
24405+ movl $pa(cpu_gdt_table),%edi
24406+ movl $__per_cpu_load,%eax
24407+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
24408+ rorl $16,%eax
24409+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
24410+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
24411+ movl $__per_cpu_end - 1,%eax
24412+ subl $__per_cpu_start,%eax
24413+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
24414+#endif
24415+
24416+#ifdef CONFIG_PAX_MEMORY_UDEREF
24417+ movl $NR_CPUS,%ecx
24418+ movl $pa(cpu_gdt_table),%edi
24419+1:
24420+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
24421+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
24422+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
24423+ addl $PAGE_SIZE_asm,%edi
24424+ loop 1b
24425+#endif
24426+
24427+#ifdef CONFIG_PAX_KERNEXEC
24428+ movl $pa(boot_gdt),%edi
24429+ movl $__LOAD_PHYSICAL_ADDR,%eax
24430+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
24431+ rorl $16,%eax
24432+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
24433+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
24434+ rorl $16,%eax
24435+
24436+ ljmp $(__BOOT_CS),$1f
24437+1:
24438+
24439+ movl $NR_CPUS,%ecx
24440+ movl $pa(cpu_gdt_table),%edi
24441+ addl $__PAGE_OFFSET,%eax
24442+1:
24443+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
24444+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
24445+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
24446+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
24447+ rorl $16,%eax
24448+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
24449+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
24450+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
24451+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
24452+ rorl $16,%eax
24453+ addl $PAGE_SIZE_asm,%edi
24454+ loop 1b
24455+#endif
24456+
24457 /*
24458 * Clear BSS first so that there are no surprises...
24459 */
24460@@ -201,8 +269,11 @@ ENTRY(startup_32)
24461 movl %eax, pa(max_pfn_mapped)
24462
24463 /* Do early initialization of the fixmap area */
24464- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24465- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
24466+#ifdef CONFIG_COMPAT_VDSO
24467+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
24468+#else
24469+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
24470+#endif
24471 #else /* Not PAE */
24472
24473 page_pde_offset = (__PAGE_OFFSET >> 20);
24474@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24475 movl %eax, pa(max_pfn_mapped)
24476
24477 /* Do early initialization of the fixmap area */
24478- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24479- movl %eax,pa(initial_page_table+0xffc)
24480+#ifdef CONFIG_COMPAT_VDSO
24481+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
24482+#else
24483+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
24484+#endif
24485 #endif
24486
24487 #ifdef CONFIG_PARAVIRT
24488@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24489 cmpl $num_subarch_entries, %eax
24490 jae bad_subarch
24491
24492- movl pa(subarch_entries)(,%eax,4), %eax
24493- subl $__PAGE_OFFSET, %eax
24494- jmp *%eax
24495+ jmp *pa(subarch_entries)(,%eax,4)
24496
24497 bad_subarch:
24498 WEAK(lguest_entry)
24499@@ -261,10 +333,10 @@ WEAK(xen_entry)
24500 __INITDATA
24501
24502 subarch_entries:
24503- .long default_entry /* normal x86/PC */
24504- .long lguest_entry /* lguest hypervisor */
24505- .long xen_entry /* Xen hypervisor */
24506- .long default_entry /* Moorestown MID */
24507+ .long ta(default_entry) /* normal x86/PC */
24508+ .long ta(lguest_entry) /* lguest hypervisor */
24509+ .long ta(xen_entry) /* Xen hypervisor */
24510+ .long ta(default_entry) /* Moorestown MID */
24511 num_subarch_entries = (. - subarch_entries) / 4
24512 .previous
24513 #else
24514@@ -354,6 +426,7 @@ default_entry:
24515 movl pa(mmu_cr4_features),%eax
24516 movl %eax,%cr4
24517
24518+#ifdef CONFIG_X86_PAE
24519 testb $X86_CR4_PAE, %al # check if PAE is enabled
24520 jz enable_paging
24521
24522@@ -382,6 +455,9 @@ default_entry:
24523 /* Make changes effective */
24524 wrmsr
24525
24526+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
24527+#endif
24528+
24529 enable_paging:
24530
24531 /*
24532@@ -449,14 +525,20 @@ is486:
24533 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
24534 movl %eax,%ss # after changing gdt.
24535
24536- movl $(__USER_DS),%eax # DS/ES contains default USER segment
24537+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
24538 movl %eax,%ds
24539 movl %eax,%es
24540
24541 movl $(__KERNEL_PERCPU), %eax
24542 movl %eax,%fs # set this cpu's percpu
24543
24544+#ifdef CONFIG_CC_STACKPROTECTOR
24545 movl $(__KERNEL_STACK_CANARY),%eax
24546+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
24547+ movl $(__USER_DS),%eax
24548+#else
24549+ xorl %eax,%eax
24550+#endif
24551 movl %eax,%gs
24552
24553 xorl %eax,%eax # Clear LDT
24554@@ -512,8 +594,11 @@ setup_once:
24555 * relocation. Manually set base address in stack canary
24556 * segment descriptor.
24557 */
24558- movl $gdt_page,%eax
24559+ movl $cpu_gdt_table,%eax
24560 movl $stack_canary,%ecx
24561+#ifdef CONFIG_SMP
24562+ addl $__per_cpu_load,%ecx
24563+#endif
24564 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
24565 shrl $16, %ecx
24566 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
24567@@ -548,7 +633,7 @@ ENTRY(early_idt_handler)
24568 cmpl $2,(%esp) # X86_TRAP_NMI
24569 je is_nmi # Ignore NMI
24570
24571- cmpl $2,%ss:early_recursion_flag
24572+ cmpl $1,%ss:early_recursion_flag
24573 je hlt_loop
24574 incl %ss:early_recursion_flag
24575
24576@@ -586,8 +671,8 @@ ENTRY(early_idt_handler)
24577 pushl (20+6*4)(%esp) /* trapno */
24578 pushl $fault_msg
24579 call printk
24580-#endif
24581 call dump_stack
24582+#endif
24583 hlt_loop:
24584 hlt
24585 jmp hlt_loop
24586@@ -607,8 +692,11 @@ ENDPROC(early_idt_handler)
24587 /* This is the default interrupt "handler" :-) */
24588 ALIGN
24589 ignore_int:
24590- cld
24591 #ifdef CONFIG_PRINTK
24592+ cmpl $2,%ss:early_recursion_flag
24593+ je hlt_loop
24594+ incl %ss:early_recursion_flag
24595+ cld
24596 pushl %eax
24597 pushl %ecx
24598 pushl %edx
24599@@ -617,9 +705,6 @@ ignore_int:
24600 movl $(__KERNEL_DS),%eax
24601 movl %eax,%ds
24602 movl %eax,%es
24603- cmpl $2,early_recursion_flag
24604- je hlt_loop
24605- incl early_recursion_flag
24606 pushl 16(%esp)
24607 pushl 24(%esp)
24608 pushl 32(%esp)
24609@@ -653,29 +738,34 @@ ENTRY(setup_once_ref)
24610 /*
24611 * BSS section
24612 */
24613-__PAGE_ALIGNED_BSS
24614- .align PAGE_SIZE
24615 #ifdef CONFIG_X86_PAE
24616+.section .initial_pg_pmd,"a",@progbits
24617 initial_pg_pmd:
24618 .fill 1024*KPMDS,4,0
24619 #else
24620+.section .initial_page_table,"a",@progbits
24621 ENTRY(initial_page_table)
24622 .fill 1024,4,0
24623 #endif
24624+.section .initial_pg_fixmap,"a",@progbits
24625 initial_pg_fixmap:
24626 .fill 1024,4,0
24627+.section .empty_zero_page,"a",@progbits
24628 ENTRY(empty_zero_page)
24629 .fill 4096,1,0
24630+.section .swapper_pg_dir,"a",@progbits
24631 ENTRY(swapper_pg_dir)
24632+#ifdef CONFIG_X86_PAE
24633+ .fill 4,8,0
24634+#else
24635 .fill 1024,4,0
24636+#endif
24637
24638 /*
24639 * This starts the data section.
24640 */
24641 #ifdef CONFIG_X86_PAE
24642-__PAGE_ALIGNED_DATA
24643- /* Page-aligned for the benefit of paravirt? */
24644- .align PAGE_SIZE
24645+.section .initial_page_table,"a",@progbits
24646 ENTRY(initial_page_table)
24647 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
24648 # if KPMDS == 3
24649@@ -694,12 +784,20 @@ ENTRY(initial_page_table)
24650 # error "Kernel PMDs should be 1, 2 or 3"
24651 # endif
24652 .align PAGE_SIZE /* needs to be page-sized too */
24653+
24654+#ifdef CONFIG_PAX_PER_CPU_PGD
24655+ENTRY(cpu_pgd)
24656+ .rept 2*NR_CPUS
24657+ .fill 4,8,0
24658+ .endr
24659+#endif
24660+
24661 #endif
24662
24663 .data
24664 .balign 4
24665 ENTRY(stack_start)
24666- .long init_thread_union+THREAD_SIZE
24667+ .long init_thread_union+THREAD_SIZE-8
24668
24669 __INITRODATA
24670 int_msg:
24671@@ -727,7 +825,7 @@ fault_msg:
24672 * segment size, and 32-bit linear address value:
24673 */
24674
24675- .data
24676+.section .rodata,"a",@progbits
24677 .globl boot_gdt_descr
24678 .globl idt_descr
24679
24680@@ -736,7 +834,7 @@ fault_msg:
24681 .word 0 # 32 bit align gdt_desc.address
24682 boot_gdt_descr:
24683 .word __BOOT_DS+7
24684- .long boot_gdt - __PAGE_OFFSET
24685+ .long pa(boot_gdt)
24686
24687 .word 0 # 32-bit align idt_desc.address
24688 idt_descr:
24689@@ -747,7 +845,7 @@ idt_descr:
24690 .word 0 # 32 bit align gdt_desc.address
24691 ENTRY(early_gdt_descr)
24692 .word GDT_ENTRIES*8-1
24693- .long gdt_page /* Overwritten for secondary CPUs */
24694+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
24695
24696 /*
24697 * The boot_gdt must mirror the equivalent in setup.S and is
24698@@ -756,5 +854,65 @@ ENTRY(early_gdt_descr)
24699 .align L1_CACHE_BYTES
24700 ENTRY(boot_gdt)
24701 .fill GDT_ENTRY_BOOT_CS,8,0
24702- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
24703- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
24704+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
24705+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
24706+
24707+ .align PAGE_SIZE_asm
24708+ENTRY(cpu_gdt_table)
24709+ .rept NR_CPUS
24710+ .quad 0x0000000000000000 /* NULL descriptor */
24711+ .quad 0x0000000000000000 /* 0x0b reserved */
24712+ .quad 0x0000000000000000 /* 0x13 reserved */
24713+ .quad 0x0000000000000000 /* 0x1b reserved */
24714+
24715+#ifdef CONFIG_PAX_KERNEXEC
24716+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
24717+#else
24718+ .quad 0x0000000000000000 /* 0x20 unused */
24719+#endif
24720+
24721+ .quad 0x0000000000000000 /* 0x28 unused */
24722+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
24723+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
24724+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
24725+ .quad 0x0000000000000000 /* 0x4b reserved */
24726+ .quad 0x0000000000000000 /* 0x53 reserved */
24727+ .quad 0x0000000000000000 /* 0x5b reserved */
24728+
24729+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
24730+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
24731+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
24732+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
24733+
24734+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
24735+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
24736+
24737+ /*
24738+ * Segments used for calling PnP BIOS have byte granularity.
24739+ * The code segments and data segments have fixed 64k limits,
24740+ * the transfer segment sizes are set at run time.
24741+ */
24742+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
24743+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
24744+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
24745+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
24746+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
24747+
24748+ /*
24749+ * The APM segments have byte granularity and their bases
24750+ * are set at run time. All have 64k limits.
24751+ */
24752+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
24753+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
24754+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
24755+
24756+ .quad 0x00c093000000ffff /* 0xd0 - ESPFIX SS */
24757+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
24758+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
24759+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
24760+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
24761+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
24762+
24763+ /* Be sure this is zeroed to avoid false validations in Xen */
24764+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
24765+ .endr
24766diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
24767index 6fd514d9..320367e 100644
24768--- a/arch/x86/kernel/head_64.S
24769+++ b/arch/x86/kernel/head_64.S
24770@@ -20,6 +20,8 @@
24771 #include <asm/processor-flags.h>
24772 #include <asm/percpu.h>
24773 #include <asm/nops.h>
24774+#include <asm/cpufeature.h>
24775+#include <asm/alternative-asm.h>
24776
24777 #ifdef CONFIG_PARAVIRT
24778 #include <asm/asm-offsets.h>
24779@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
24780 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
24781 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
24782 L3_START_KERNEL = pud_index(__START_KERNEL_map)
24783+L4_VMALLOC_START = pgd_index(VMALLOC_START)
24784+L3_VMALLOC_START = pud_index(VMALLOC_START)
24785+L4_VMALLOC_END = pgd_index(VMALLOC_END)
24786+L3_VMALLOC_END = pud_index(VMALLOC_END)
24787+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
24788+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
24789
24790 .text
24791 __HEAD
24792@@ -89,11 +97,26 @@ startup_64:
24793 * Fixup the physical addresses in the page table
24794 */
24795 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
24796+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
24797+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
24798+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
24799+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
24800+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
24801
24802- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
24803- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
24804+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
24805+#ifndef CONFIG_XEN
24806+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
24807+#endif
24808
24809+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
24810+
24811+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
24812+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
24813+
24814+ addq %rbp, level2_fixmap_pgt + (504*8)(%rip)
24815+ addq %rbp, level2_fixmap_pgt + (505*8)(%rip)
24816 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
24817+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
24818
24819 /*
24820 * Set up the identity mapping for the switchover. These
24821@@ -174,11 +197,12 @@ ENTRY(secondary_startup_64)
24822 * after the boot processor executes this code.
24823 */
24824
24825+ orq $-1, %rbp
24826 movq $(init_level4_pgt - __START_KERNEL_map), %rax
24827 1:
24828
24829- /* Enable PAE mode and PGE */
24830- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
24831+ /* Enable PAE mode and PSE/PGE */
24832+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
24833 movq %rcx, %cr4
24834
24835 /* Setup early boot stage 4 level pagetables. */
24836@@ -199,10 +223,21 @@ ENTRY(secondary_startup_64)
24837 movl $MSR_EFER, %ecx
24838 rdmsr
24839 btsl $_EFER_SCE, %eax /* Enable System Call */
24840- btl $20,%edi /* No Execute supported? */
24841+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
24842 jnc 1f
24843 btsl $_EFER_NX, %eax
24844+ cmpq $-1, %rbp
24845+ je 1f
24846 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
24847+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
24848+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
24849+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
24850+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
24851+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*504(%rip)
24852+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*505(%rip)
24853+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
24854+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
24855+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
24856 1: wrmsr /* Make changes effective */
24857
24858 /* Setup cr0 */
24859@@ -282,6 +317,7 @@ ENTRY(secondary_startup_64)
24860 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
24861 * address given in m16:64.
24862 */
24863+ pax_set_fptr_mask
24864 movq initial_code(%rip),%rax
24865 pushq $0 # fake return address to stop unwinder
24866 pushq $__KERNEL_CS # set correct cs
24867@@ -313,7 +349,7 @@ ENDPROC(start_cpu0)
24868 .quad INIT_PER_CPU_VAR(irq_stack_union)
24869
24870 GLOBAL(stack_start)
24871- .quad init_thread_union+THREAD_SIZE-8
24872+ .quad init_thread_union+THREAD_SIZE-16
24873 .word 0
24874 __FINITDATA
24875
24876@@ -391,7 +427,7 @@ ENTRY(early_idt_handler)
24877 call dump_stack
24878 #ifdef CONFIG_KALLSYMS
24879 leaq early_idt_ripmsg(%rip),%rdi
24880- movq 40(%rsp),%rsi # %rip again
24881+ movq 88(%rsp),%rsi # %rip again
24882 call __print_symbol
24883 #endif
24884 #endif /* EARLY_PRINTK */
24885@@ -420,6 +456,7 @@ ENDPROC(early_idt_handler)
24886 early_recursion_flag:
24887 .long 0
24888
24889+ .section .rodata,"a",@progbits
24890 #ifdef CONFIG_EARLY_PRINTK
24891 early_idt_msg:
24892 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
24893@@ -447,29 +484,52 @@ NEXT_PAGE(early_level4_pgt)
24894 NEXT_PAGE(early_dynamic_pgts)
24895 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
24896
24897- .data
24898+ .section .rodata,"a",@progbits
24899
24900-#ifndef CONFIG_XEN
24901 NEXT_PAGE(init_level4_pgt)
24902- .fill 512,8,0
24903-#else
24904-NEXT_PAGE(init_level4_pgt)
24905- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24906 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
24907 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24908+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
24909+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
24910+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
24911+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
24912+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
24913+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24914 .org init_level4_pgt + L4_START_KERNEL*8, 0
24915 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
24916 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
24917
24918+#ifdef CONFIG_PAX_PER_CPU_PGD
24919+NEXT_PAGE(cpu_pgd)
24920+ .rept 2*NR_CPUS
24921+ .fill 512,8,0
24922+ .endr
24923+#endif
24924+
24925 NEXT_PAGE(level3_ident_pgt)
24926 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24927+#ifdef CONFIG_XEN
24928 .fill 511, 8, 0
24929+#else
24930+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
24931+ .fill 510,8,0
24932+#endif
24933+
24934+NEXT_PAGE(level3_vmalloc_start_pgt)
24935+ .fill 512,8,0
24936+
24937+NEXT_PAGE(level3_vmalloc_end_pgt)
24938+ .fill 512,8,0
24939+
24940+NEXT_PAGE(level3_vmemmap_pgt)
24941+ .fill L3_VMEMMAP_START,8,0
24942+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24943+
24944 NEXT_PAGE(level2_ident_pgt)
24945- /* Since I easily can, map the first 1G.
24946+ /* Since I easily can, map the first 2G.
24947 * Don't set NX because code runs from these pages.
24948 */
24949- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
24950-#endif
24951+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
24952
24953 NEXT_PAGE(level3_kernel_pgt)
24954 .fill L3_START_KERNEL,8,0
24955@@ -477,6 +537,9 @@ NEXT_PAGE(level3_kernel_pgt)
24956 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
24957 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
24958
24959+NEXT_PAGE(level2_vmemmap_pgt)
24960+ .fill 512,8,0
24961+
24962 NEXT_PAGE(level2_kernel_pgt)
24963 /*
24964 * 512 MB kernel mapping. We spend a full page on this pagetable
24965@@ -492,23 +555,61 @@ NEXT_PAGE(level2_kernel_pgt)
24966 KERNEL_IMAGE_SIZE/PMD_SIZE)
24967
24968 NEXT_PAGE(level2_fixmap_pgt)
24969- .fill 506,8,0
24970- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
24971- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
24972- .fill 5,8,0
24973+ .fill 504,8,0
24974+ .quad level1_fixmap_pgt - __START_KERNEL_map + 0 * PAGE_SIZE + _PAGE_TABLE
24975+ .quad level1_fixmap_pgt - __START_KERNEL_map + 1 * PAGE_SIZE + _PAGE_TABLE
24976+ .quad level1_fixmap_pgt - __START_KERNEL_map + 2 * PAGE_SIZE + _PAGE_TABLE
24977+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
24978+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
24979+ .fill 4,8,0
24980
24981 NEXT_PAGE(level1_fixmap_pgt)
24982+ .fill 3*512,8,0
24983+
24984+NEXT_PAGE(level1_vsyscall_pgt)
24985 .fill 512,8,0
24986
24987 #undef PMDS
24988
24989- .data
24990+ .align PAGE_SIZE
24991+ENTRY(cpu_gdt_table)
24992+ .rept NR_CPUS
24993+ .quad 0x0000000000000000 /* NULL descriptor */
24994+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
24995+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
24996+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
24997+ .quad 0x00cffb000000ffff /* __USER32_CS */
24998+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
24999+ .quad 0x00affb000000ffff /* __USER_CS */
25000+
25001+#ifdef CONFIG_PAX_KERNEXEC
25002+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
25003+#else
25004+ .quad 0x0 /* unused */
25005+#endif
25006+
25007+ .quad 0,0 /* TSS */
25008+ .quad 0,0 /* LDT */
25009+ .quad 0,0,0 /* three TLS descriptors */
25010+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
25011+ /* asm/segment.h:GDT_ENTRIES must match this */
25012+
25013+#ifdef CONFIG_PAX_MEMORY_UDEREF
25014+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
25015+#else
25016+ .quad 0x0 /* unused */
25017+#endif
25018+
25019+ /* zero the remaining page */
25020+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
25021+ .endr
25022+
25023 .align 16
25024 .globl early_gdt_descr
25025 early_gdt_descr:
25026 .word GDT_ENTRIES*8-1
25027 early_gdt_descr_base:
25028- .quad INIT_PER_CPU_VAR(gdt_page)
25029+ .quad cpu_gdt_table
25030
25031 ENTRY(phys_base)
25032 /* This must match the first entry in level2_kernel_pgt */
25033@@ -532,8 +633,8 @@ NEXT_PAGE(kasan_zero_pud)
25034
25035
25036 #include "../../x86/xen/xen-head.S"
25037-
25038- __PAGE_ALIGNED_BSS
25039+
25040+ .section .rodata,"a",@progbits
25041 NEXT_PAGE(empty_zero_page)
25042 .skip PAGE_SIZE
25043
25044diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
25045index 05fd74f..c3548b1 100644
25046--- a/arch/x86/kernel/i386_ksyms_32.c
25047+++ b/arch/x86/kernel/i386_ksyms_32.c
25048@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
25049 EXPORT_SYMBOL(cmpxchg8b_emu);
25050 #endif
25051
25052+EXPORT_SYMBOL_GPL(cpu_gdt_table);
25053+
25054 /* Networking helper routines. */
25055 EXPORT_SYMBOL(csum_partial_copy_generic);
25056+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
25057+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
25058
25059 EXPORT_SYMBOL(__get_user_1);
25060 EXPORT_SYMBOL(__get_user_2);
25061@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
25062 EXPORT_SYMBOL(___preempt_schedule_context);
25063 #endif
25064 #endif
25065+
25066+#ifdef CONFIG_PAX_KERNEXEC
25067+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
25068+#endif
25069+
25070+#ifdef CONFIG_PAX_PER_CPU_PGD
25071+EXPORT_SYMBOL(cpu_pgd);
25072+#endif
25073diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
25074index d5651fc..29c740d 100644
25075--- a/arch/x86/kernel/i387.c
25076+++ b/arch/x86/kernel/i387.c
25077@@ -68,7 +68,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
25078 static inline bool interrupted_user_mode(void)
25079 {
25080 struct pt_regs *regs = get_irq_regs();
25081- return regs && user_mode_vm(regs);
25082+ return regs && user_mode(regs);
25083 }
25084
25085 /*
25086diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
25087index e7cc537..67d7372 100644
25088--- a/arch/x86/kernel/i8259.c
25089+++ b/arch/x86/kernel/i8259.c
25090@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
25091 static void make_8259A_irq(unsigned int irq)
25092 {
25093 disable_irq_nosync(irq);
25094- io_apic_irqs &= ~(1<<irq);
25095+ io_apic_irqs &= ~(1UL<<irq);
25096 irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
25097 enable_irq(irq);
25098 }
25099@@ -208,7 +208,7 @@ spurious_8259A_irq:
25100 "spurious 8259A interrupt: IRQ%d.\n", irq);
25101 spurious_irq_mask |= irqmask;
25102 }
25103- atomic_inc(&irq_err_count);
25104+ atomic_inc_unchecked(&irq_err_count);
25105 /*
25106 * Theoretically we do not have to handle this IRQ,
25107 * but in Linux this does not cause problems and is
25108@@ -349,14 +349,16 @@ static void init_8259A(int auto_eoi)
25109 /* (slave's support for AEOI in flat mode is to be investigated) */
25110 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
25111
25112+ pax_open_kernel();
25113 if (auto_eoi)
25114 /*
25115 * In AEOI mode we just have to mask the interrupt
25116 * when acking.
25117 */
25118- i8259A_chip.irq_mask_ack = disable_8259A_irq;
25119+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
25120 else
25121- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25122+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25123+ pax_close_kernel();
25124
25125 udelay(100); /* wait for 8259A to initialize */
25126
25127diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
25128index a979b5b..1d6db75 100644
25129--- a/arch/x86/kernel/io_delay.c
25130+++ b/arch/x86/kernel/io_delay.c
25131@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
25132 * Quirk table for systems that misbehave (lock up, etc.) if port
25133 * 0x80 is used:
25134 */
25135-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
25136+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
25137 {
25138 .callback = dmi_io_delay_0xed_port,
25139 .ident = "Compaq Presario V6000",
25140diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
25141index 4ddaf66..49d5c18 100644
25142--- a/arch/x86/kernel/ioport.c
25143+++ b/arch/x86/kernel/ioport.c
25144@@ -6,6 +6,7 @@
25145 #include <linux/sched.h>
25146 #include <linux/kernel.h>
25147 #include <linux/capability.h>
25148+#include <linux/security.h>
25149 #include <linux/errno.h>
25150 #include <linux/types.h>
25151 #include <linux/ioport.h>
25152@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25153 return -EINVAL;
25154 if (turn_on && !capable(CAP_SYS_RAWIO))
25155 return -EPERM;
25156+#ifdef CONFIG_GRKERNSEC_IO
25157+ if (turn_on && grsec_disable_privio) {
25158+ gr_handle_ioperm();
25159+ return -ENODEV;
25160+ }
25161+#endif
25162
25163 /*
25164 * If it's the first ioperm() call in this thread's lifetime, set the
25165@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25166 * because the ->io_bitmap_max value must match the bitmap
25167 * contents:
25168 */
25169- tss = &per_cpu(init_tss, get_cpu());
25170+ tss = init_tss + get_cpu();
25171
25172 if (turn_on)
25173 bitmap_clear(t->io_bitmap_ptr, from, num);
25174@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
25175 if (level > old) {
25176 if (!capable(CAP_SYS_RAWIO))
25177 return -EPERM;
25178+#ifdef CONFIG_GRKERNSEC_IO
25179+ if (grsec_disable_privio) {
25180+ gr_handle_iopl();
25181+ return -ENODEV;
25182+ }
25183+#endif
25184 }
25185 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
25186 t->iopl = level << 12;
25187diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
25188index 67b1cbe..6ad4cbc 100644
25189--- a/arch/x86/kernel/irq.c
25190+++ b/arch/x86/kernel/irq.c
25191@@ -22,7 +22,7 @@
25192 #define CREATE_TRACE_POINTS
25193 #include <asm/trace/irq_vectors.h>
25194
25195-atomic_t irq_err_count;
25196+atomic_unchecked_t irq_err_count;
25197
25198 /* Function pointer for generic interrupt vector handling */
25199 void (*x86_platform_ipi_callback)(void) = NULL;
25200@@ -132,9 +132,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
25201 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
25202 seq_puts(p, " Hypervisor callback interrupts\n");
25203 #endif
25204- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
25205+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
25206 #if defined(CONFIG_X86_IO_APIC)
25207- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
25208+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
25209 #endif
25210 return 0;
25211 }
25212@@ -174,7 +174,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
25213
25214 u64 arch_irq_stat(void)
25215 {
25216- u64 sum = atomic_read(&irq_err_count);
25217+ u64 sum = atomic_read_unchecked(&irq_err_count);
25218 return sum;
25219 }
25220
25221diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
25222index 28d28f5..e6cc9ae 100644
25223--- a/arch/x86/kernel/irq_32.c
25224+++ b/arch/x86/kernel/irq_32.c
25225@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
25226
25227 #ifdef CONFIG_DEBUG_STACKOVERFLOW
25228
25229+extern void gr_handle_kernel_exploit(void);
25230+
25231 int sysctl_panic_on_stackoverflow __read_mostly;
25232
25233 /* Debugging check for stack overflow: is there less than 1KB free? */
25234@@ -39,13 +41,14 @@ static int check_stack_overflow(void)
25235 __asm__ __volatile__("andl %%esp,%0" :
25236 "=r" (sp) : "0" (THREAD_SIZE - 1));
25237
25238- return sp < (sizeof(struct thread_info) + STACK_WARN);
25239+ return sp < STACK_WARN;
25240 }
25241
25242 static void print_stack_overflow(void)
25243 {
25244 printk(KERN_WARNING "low stack detected by irq handler\n");
25245 dump_stack();
25246+ gr_handle_kernel_exploit();
25247 if (sysctl_panic_on_stackoverflow)
25248 panic("low stack detected by irq handler - check messages\n");
25249 }
25250@@ -77,10 +80,9 @@ static inline void *current_stack(void)
25251 static inline int
25252 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25253 {
25254- struct irq_stack *curstk, *irqstk;
25255+ struct irq_stack *irqstk;
25256 u32 *isp, *prev_esp, arg1, arg2;
25257
25258- curstk = (struct irq_stack *) current_stack();
25259 irqstk = __this_cpu_read(hardirq_stack);
25260
25261 /*
25262@@ -89,15 +91,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25263 * handler) we can't do that and just have to keep using the
25264 * current stack (which is the irq stack already after all)
25265 */
25266- if (unlikely(curstk == irqstk))
25267+ if (unlikely((void *)current_stack_pointer - (void *)irqstk < THREAD_SIZE))
25268 return 0;
25269
25270- isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
25271+ isp = (u32 *) ((char *)irqstk + sizeof(*irqstk) - 8);
25272
25273 /* Save the next esp at the bottom of the stack */
25274 prev_esp = (u32 *)irqstk;
25275 *prev_esp = current_stack_pointer();
25276
25277+#ifdef CONFIG_PAX_MEMORY_UDEREF
25278+ __set_fs(MAKE_MM_SEG(0));
25279+#endif
25280+
25281 if (unlikely(overflow))
25282 call_on_stack(print_stack_overflow, isp);
25283
25284@@ -108,6 +114,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25285 : "0" (irq), "1" (desc), "2" (isp),
25286 "D" (desc->handle_irq)
25287 : "memory", "cc", "ecx");
25288+
25289+#ifdef CONFIG_PAX_MEMORY_UDEREF
25290+ __set_fs(current_thread_info()->addr_limit);
25291+#endif
25292+
25293 return 1;
25294 }
25295
25296@@ -116,32 +127,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25297 */
25298 void irq_ctx_init(int cpu)
25299 {
25300- struct irq_stack *irqstk;
25301-
25302 if (per_cpu(hardirq_stack, cpu))
25303 return;
25304
25305- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25306- THREADINFO_GFP,
25307- THREAD_SIZE_ORDER));
25308- per_cpu(hardirq_stack, cpu) = irqstk;
25309-
25310- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25311- THREADINFO_GFP,
25312- THREAD_SIZE_ORDER));
25313- per_cpu(softirq_stack, cpu) = irqstk;
25314-
25315- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
25316- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
25317+ per_cpu(hardirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25318+ per_cpu(softirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25319 }
25320
25321 void do_softirq_own_stack(void)
25322 {
25323- struct thread_info *curstk;
25324 struct irq_stack *irqstk;
25325 u32 *isp, *prev_esp;
25326
25327- curstk = current_stack();
25328 irqstk = __this_cpu_read(softirq_stack);
25329
25330 /* build the stack frame on the softirq stack */
25331@@ -151,7 +148,16 @@ void do_softirq_own_stack(void)
25332 prev_esp = (u32 *)irqstk;
25333 *prev_esp = current_stack_pointer();
25334
25335+#ifdef CONFIG_PAX_MEMORY_UDEREF
25336+ __set_fs(MAKE_MM_SEG(0));
25337+#endif
25338+
25339 call_on_stack(__do_softirq, isp);
25340+
25341+#ifdef CONFIG_PAX_MEMORY_UDEREF
25342+ __set_fs(current_thread_info()->addr_limit);
25343+#endif
25344+
25345 }
25346
25347 bool handle_irq(unsigned irq, struct pt_regs *regs)
25348@@ -165,7 +171,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
25349 if (unlikely(!desc))
25350 return false;
25351
25352- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25353+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25354 if (unlikely(overflow))
25355 print_stack_overflow();
25356 desc->handle_irq(irq, desc);
25357diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
25358index e4b503d..824fce8 100644
25359--- a/arch/x86/kernel/irq_64.c
25360+++ b/arch/x86/kernel/irq_64.c
25361@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
25362 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
25363 EXPORT_PER_CPU_SYMBOL(irq_regs);
25364
25365+extern void gr_handle_kernel_exploit(void);
25366+
25367 int sysctl_panic_on_stackoverflow;
25368
25369 /*
25370@@ -44,7 +46,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25371 u64 estack_top, estack_bottom;
25372 u64 curbase = (u64)task_stack_page(current);
25373
25374- if (user_mode_vm(regs))
25375+ if (user_mode(regs))
25376 return;
25377
25378 if (regs->sp >= curbase + sizeof(struct thread_info) +
25379@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25380 irq_stack_top, irq_stack_bottom,
25381 estack_top, estack_bottom);
25382
25383+ gr_handle_kernel_exploit();
25384+
25385 if (sysctl_panic_on_stackoverflow)
25386 panic("low stack detected by irq handler - check messages\n");
25387 #endif
25388diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
25389index 26d5a55..a01160a 100644
25390--- a/arch/x86/kernel/jump_label.c
25391+++ b/arch/x86/kernel/jump_label.c
25392@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25393 * Jump label is enabled for the first time.
25394 * So we expect a default_nop...
25395 */
25396- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
25397+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
25398 != 0))
25399 bug_at((void *)entry->code, __LINE__);
25400 } else {
25401@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25402 * ...otherwise expect an ideal_nop. Otherwise
25403 * something went horribly wrong.
25404 */
25405- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
25406+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
25407 != 0))
25408 bug_at((void *)entry->code, __LINE__);
25409 }
25410@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
25411 * are converting the default nop to the ideal nop.
25412 */
25413 if (init) {
25414- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
25415+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
25416 bug_at((void *)entry->code, __LINE__);
25417 } else {
25418 code.jump = 0xe9;
25419 code.offset = entry->target -
25420 (entry->code + JUMP_LABEL_NOP_SIZE);
25421- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
25422+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
25423 bug_at((void *)entry->code, __LINE__);
25424 }
25425 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
25426diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
25427index 25ecd56..e12482f 100644
25428--- a/arch/x86/kernel/kgdb.c
25429+++ b/arch/x86/kernel/kgdb.c
25430@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
25431 #ifdef CONFIG_X86_32
25432 switch (regno) {
25433 case GDB_SS:
25434- if (!user_mode_vm(regs))
25435+ if (!user_mode(regs))
25436 *(unsigned long *)mem = __KERNEL_DS;
25437 break;
25438 case GDB_SP:
25439- if (!user_mode_vm(regs))
25440+ if (!user_mode(regs))
25441 *(unsigned long *)mem = kernel_stack_pointer(regs);
25442 break;
25443 case GDB_GS:
25444@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
25445 bp->attr.bp_addr = breakinfo[breakno].addr;
25446 bp->attr.bp_len = breakinfo[breakno].len;
25447 bp->attr.bp_type = breakinfo[breakno].type;
25448- info->address = breakinfo[breakno].addr;
25449+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
25450+ info->address = ktla_ktva(breakinfo[breakno].addr);
25451+ else
25452+ info->address = breakinfo[breakno].addr;
25453 info->len = breakinfo[breakno].len;
25454 info->type = breakinfo[breakno].type;
25455 val = arch_install_hw_breakpoint(bp);
25456@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
25457 case 'k':
25458 /* clear the trace bit */
25459 linux_regs->flags &= ~X86_EFLAGS_TF;
25460- atomic_set(&kgdb_cpu_doing_single_step, -1);
25461+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
25462
25463 /* set the trace bit if we're stepping */
25464 if (remcomInBuffer[0] == 's') {
25465 linux_regs->flags |= X86_EFLAGS_TF;
25466- atomic_set(&kgdb_cpu_doing_single_step,
25467+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
25468 raw_smp_processor_id());
25469 }
25470
25471@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
25472
25473 switch (cmd) {
25474 case DIE_DEBUG:
25475- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
25476+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
25477 if (user_mode(regs))
25478 return single_step_cont(regs, args);
25479 break;
25480@@ -750,11 +753,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25481 #endif /* CONFIG_DEBUG_RODATA */
25482
25483 bpt->type = BP_BREAKPOINT;
25484- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
25485+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
25486 BREAK_INSTR_SIZE);
25487 if (err)
25488 return err;
25489- err = probe_kernel_write((char *)bpt->bpt_addr,
25490+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25491 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
25492 #ifdef CONFIG_DEBUG_RODATA
25493 if (!err)
25494@@ -767,7 +770,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25495 return -EBUSY;
25496 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
25497 BREAK_INSTR_SIZE);
25498- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25499+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25500 if (err)
25501 return err;
25502 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
25503@@ -792,13 +795,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
25504 if (mutex_is_locked(&text_mutex))
25505 goto knl_write;
25506 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
25507- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25508+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25509 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
25510 goto knl_write;
25511 return err;
25512 knl_write:
25513 #endif /* CONFIG_DEBUG_RODATA */
25514- return probe_kernel_write((char *)bpt->bpt_addr,
25515+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25516 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
25517 }
25518
25519diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
25520index 4e3d5a9..03fffd8 100644
25521--- a/arch/x86/kernel/kprobes/core.c
25522+++ b/arch/x86/kernel/kprobes/core.c
25523@@ -120,9 +120,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
25524 s32 raddr;
25525 } __packed *insn;
25526
25527- insn = (struct __arch_relative_insn *)from;
25528+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
25529+
25530+ pax_open_kernel();
25531 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
25532 insn->op = op;
25533+ pax_close_kernel();
25534 }
25535
25536 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
25537@@ -168,7 +171,7 @@ int can_boost(kprobe_opcode_t *opcodes)
25538 kprobe_opcode_t opcode;
25539 kprobe_opcode_t *orig_opcodes = opcodes;
25540
25541- if (search_exception_tables((unsigned long)opcodes))
25542+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
25543 return 0; /* Page fault may occur on this address. */
25544
25545 retry:
25546@@ -260,12 +263,12 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
25547 * Fortunately, we know that the original code is the ideal 5-byte
25548 * long NOP.
25549 */
25550- memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25551+ memcpy(buf, (void *)ktla_ktva(addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25552 if (faddr)
25553 memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
25554 else
25555 buf[0] = kp->opcode;
25556- return (unsigned long)buf;
25557+ return ktva_ktla((unsigned long)buf);
25558 }
25559
25560 /*
25561@@ -364,7 +367,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25562 /* Another subsystem puts a breakpoint, failed to recover */
25563 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
25564 return 0;
25565+ pax_open_kernel();
25566 memcpy(dest, insn.kaddr, insn.length);
25567+ pax_close_kernel();
25568
25569 #ifdef CONFIG_X86_64
25570 if (insn_rip_relative(&insn)) {
25571@@ -391,7 +396,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25572 return 0;
25573 }
25574 disp = (u8 *) dest + insn_offset_displacement(&insn);
25575+ pax_open_kernel();
25576 *(s32 *) disp = (s32) newdisp;
25577+ pax_close_kernel();
25578 }
25579 #endif
25580 return insn.length;
25581@@ -533,7 +540,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25582 * nor set current_kprobe, because it doesn't use single
25583 * stepping.
25584 */
25585- regs->ip = (unsigned long)p->ainsn.insn;
25586+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25587 preempt_enable_no_resched();
25588 return;
25589 }
25590@@ -550,9 +557,9 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25591 regs->flags &= ~X86_EFLAGS_IF;
25592 /* single step inline if the instruction is an int3 */
25593 if (p->opcode == BREAKPOINT_INSTRUCTION)
25594- regs->ip = (unsigned long)p->addr;
25595+ regs->ip = ktla_ktva((unsigned long)p->addr);
25596 else
25597- regs->ip = (unsigned long)p->ainsn.insn;
25598+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25599 }
25600 NOKPROBE_SYMBOL(setup_singlestep);
25601
25602@@ -602,7 +609,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25603 struct kprobe *p;
25604 struct kprobe_ctlblk *kcb;
25605
25606- if (user_mode_vm(regs))
25607+ if (user_mode(regs))
25608 return 0;
25609
25610 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
25611@@ -637,7 +644,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25612 setup_singlestep(p, regs, kcb, 0);
25613 return 1;
25614 }
25615- } else if (*addr != BREAKPOINT_INSTRUCTION) {
25616+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
25617 /*
25618 * The breakpoint instruction was removed right
25619 * after we hit it. Another cpu has removed
25620@@ -684,6 +691,9 @@ static void __used kretprobe_trampoline_holder(void)
25621 " movq %rax, 152(%rsp)\n"
25622 RESTORE_REGS_STRING
25623 " popfq\n"
25624+#ifdef KERNEXEC_PLUGIN
25625+ " btsq $63,(%rsp)\n"
25626+#endif
25627 #else
25628 " pushf\n"
25629 SAVE_REGS_STRING
25630@@ -824,7 +834,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
25631 struct kprobe_ctlblk *kcb)
25632 {
25633 unsigned long *tos = stack_addr(regs);
25634- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
25635+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
25636 unsigned long orig_ip = (unsigned long)p->addr;
25637 kprobe_opcode_t *insn = p->ainsn.insn;
25638
25639@@ -1007,7 +1017,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
25640 struct die_args *args = data;
25641 int ret = NOTIFY_DONE;
25642
25643- if (args->regs && user_mode_vm(args->regs))
25644+ if (args->regs && user_mode(args->regs))
25645 return ret;
25646
25647 if (val == DIE_GPF) {
25648diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
25649index 7b3b9d1..e2478b91 100644
25650--- a/arch/x86/kernel/kprobes/opt.c
25651+++ b/arch/x86/kernel/kprobes/opt.c
25652@@ -79,6 +79,7 @@ found:
25653 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
25654 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25655 {
25656+ pax_open_kernel();
25657 #ifdef CONFIG_X86_64
25658 *addr++ = 0x48;
25659 *addr++ = 0xbf;
25660@@ -86,6 +87,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25661 *addr++ = 0xb8;
25662 #endif
25663 *(unsigned long *)addr = val;
25664+ pax_close_kernel();
25665 }
25666
25667 asm (
25668@@ -342,7 +344,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
25669 * Verify if the address gap is in 2GB range, because this uses
25670 * a relative jump.
25671 */
25672- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
25673+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
25674 if (abs(rel) > 0x7fffffff) {
25675 __arch_remove_optimized_kprobe(op, 0);
25676 return -ERANGE;
25677@@ -359,16 +361,18 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
25678 op->optinsn.size = ret;
25679
25680 /* Copy arch-dep-instance from template */
25681- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
25682+ pax_open_kernel();
25683+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
25684+ pax_close_kernel();
25685
25686 /* Set probe information */
25687 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
25688
25689 /* Set probe function call */
25690- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
25691+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
25692
25693 /* Set returning jmp instruction at the tail of out-of-line buffer */
25694- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
25695+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
25696 (u8 *)op->kp.addr + op->optinsn.size);
25697
25698 flush_icache_range((unsigned long) buf,
25699@@ -393,7 +397,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
25700 WARN_ON(kprobe_disabled(&op->kp));
25701
25702 /* Backup instructions which will be replaced by jump address */
25703- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
25704+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
25705 RELATIVE_ADDR_SIZE);
25706
25707 insn_buf[0] = RELATIVEJUMP_OPCODE;
25708@@ -441,7 +445,7 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
25709 /* This kprobe is really able to run optimized path. */
25710 op = container_of(p, struct optimized_kprobe, kp);
25711 /* Detour through copied instructions */
25712- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
25713+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
25714 if (!reenter)
25715 reset_current_kprobe();
25716 preempt_enable_no_resched();
25717diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
25718index c2bedae..25e7ab60 100644
25719--- a/arch/x86/kernel/ksysfs.c
25720+++ b/arch/x86/kernel/ksysfs.c
25721@@ -184,7 +184,7 @@ out:
25722
25723 static struct kobj_attribute type_attr = __ATTR_RO(type);
25724
25725-static struct bin_attribute data_attr = {
25726+static bin_attribute_no_const data_attr __read_only = {
25727 .attr = {
25728 .name = "data",
25729 .mode = S_IRUGO,
25730diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
25731index c37886d..d851d32 100644
25732--- a/arch/x86/kernel/ldt.c
25733+++ b/arch/x86/kernel/ldt.c
25734@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
25735 if (reload) {
25736 #ifdef CONFIG_SMP
25737 preempt_disable();
25738- load_LDT(pc);
25739+ load_LDT_nolock(pc);
25740 if (!cpumask_equal(mm_cpumask(current->mm),
25741 cpumask_of(smp_processor_id())))
25742 smp_call_function(flush_ldt, current->mm, 1);
25743 preempt_enable();
25744 #else
25745- load_LDT(pc);
25746+ load_LDT_nolock(pc);
25747 #endif
25748 }
25749 if (oldsize) {
25750@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
25751 return err;
25752
25753 for (i = 0; i < old->size; i++)
25754- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
25755+ write_ldt_entry(new->ldt, i, old->ldt + i);
25756 return 0;
25757 }
25758
25759@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
25760 retval = copy_ldt(&mm->context, &old_mm->context);
25761 mutex_unlock(&old_mm->context.lock);
25762 }
25763+
25764+ if (tsk == current) {
25765+ mm->context.vdso = 0;
25766+
25767+#ifdef CONFIG_X86_32
25768+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25769+ mm->context.user_cs_base = 0UL;
25770+ mm->context.user_cs_limit = ~0UL;
25771+
25772+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
25773+ cpus_clear(mm->context.cpu_user_cs_mask);
25774+#endif
25775+
25776+#endif
25777+#endif
25778+
25779+ }
25780+
25781 return retval;
25782 }
25783
25784@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
25785 }
25786 }
25787
25788+#ifdef CONFIG_PAX_SEGMEXEC
25789+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
25790+ error = -EINVAL;
25791+ goto out_unlock;
25792+ }
25793+#endif
25794+
25795 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
25796 error = -EINVAL;
25797 goto out_unlock;
25798diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c
25799index ff3c3101d..d7c0cd8 100644
25800--- a/arch/x86/kernel/livepatch.c
25801+++ b/arch/x86/kernel/livepatch.c
25802@@ -41,9 +41,10 @@ int klp_write_module_reloc(struct module *mod, unsigned long type,
25803 int ret, numpages, size = 4;
25804 bool readonly;
25805 unsigned long val;
25806- unsigned long core = (unsigned long)mod->module_core;
25807- unsigned long core_ro_size = mod->core_ro_size;
25808- unsigned long core_size = mod->core_size;
25809+ unsigned long core_rx = (unsigned long)mod->module_core_rx;
25810+ unsigned long core_rw = (unsigned long)mod->module_core_rw;
25811+ unsigned long core_size_rx = mod->core_size_rx;
25812+ unsigned long core_size_rw = mod->core_size_rw;
25813
25814 switch (type) {
25815 case R_X86_64_NONE:
25816@@ -66,11 +67,12 @@ int klp_write_module_reloc(struct module *mod, unsigned long type,
25817 return -EINVAL;
25818 }
25819
25820- if (loc < core || loc >= core + core_size)
25821+ if ((loc < core_rx || loc >= core_rx + core_size_rx) &&
25822+ (loc < core_rw || loc >= core_rw + core_size_rw))
25823 /* loc does not point to any symbol inside the module */
25824 return -EINVAL;
25825
25826- if (loc < core + core_ro_size)
25827+ if (loc < core_rx + core_size_rx)
25828 readonly = true;
25829 else
25830 readonly = false;
25831diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
25832index 469b23d..5449cfe 100644
25833--- a/arch/x86/kernel/machine_kexec_32.c
25834+++ b/arch/x86/kernel/machine_kexec_32.c
25835@@ -26,7 +26,7 @@
25836 #include <asm/cacheflush.h>
25837 #include <asm/debugreg.h>
25838
25839-static void set_idt(void *newidt, __u16 limit)
25840+static void set_idt(struct desc_struct *newidt, __u16 limit)
25841 {
25842 struct desc_ptr curidt;
25843
25844@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
25845 }
25846
25847
25848-static void set_gdt(void *newgdt, __u16 limit)
25849+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
25850 {
25851 struct desc_ptr curgdt;
25852
25853@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
25854 }
25855
25856 control_page = page_address(image->control_code_page);
25857- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
25858+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
25859
25860 relocate_kernel_ptr = control_page;
25861 page_list[PA_CONTROL_PAGE] = __pa(control_page);
25862diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
25863index 94ea120..4154cea 100644
25864--- a/arch/x86/kernel/mcount_64.S
25865+++ b/arch/x86/kernel/mcount_64.S
25866@@ -7,7 +7,7 @@
25867 #include <linux/linkage.h>
25868 #include <asm/ptrace.h>
25869 #include <asm/ftrace.h>
25870-
25871+#include <asm/alternative-asm.h>
25872
25873 .code64
25874 .section .entry.text, "ax"
25875@@ -148,8 +148,9 @@
25876 #ifdef CONFIG_DYNAMIC_FTRACE
25877
25878 ENTRY(function_hook)
25879+ pax_force_retaddr
25880 retq
25881-END(function_hook)
25882+ENDPROC(function_hook)
25883
25884 ENTRY(ftrace_caller)
25885 /* save_mcount_regs fills in first two parameters */
25886@@ -181,8 +182,9 @@ GLOBAL(ftrace_graph_call)
25887 #endif
25888
25889 GLOBAL(ftrace_stub)
25890+ pax_force_retaddr
25891 retq
25892-END(ftrace_caller)
25893+ENDPROC(ftrace_caller)
25894
25895 ENTRY(ftrace_regs_caller)
25896 /* Save the current flags before any operations that can change them */
25897@@ -253,7 +255,7 @@ GLOBAL(ftrace_regs_caller_end)
25898
25899 jmp ftrace_return
25900
25901-END(ftrace_regs_caller)
25902+ENDPROC(ftrace_regs_caller)
25903
25904
25905 #else /* ! CONFIG_DYNAMIC_FTRACE */
25906@@ -272,18 +274,20 @@ fgraph_trace:
25907 #endif
25908
25909 GLOBAL(ftrace_stub)
25910+ pax_force_retaddr
25911 retq
25912
25913 trace:
25914 /* save_mcount_regs fills in first two parameters */
25915 save_mcount_regs
25916
25917+ pax_force_fptr ftrace_trace_function
25918 call *ftrace_trace_function
25919
25920 restore_mcount_regs
25921
25922 jmp fgraph_trace
25923-END(function_hook)
25924+ENDPROC(function_hook)
25925 #endif /* CONFIG_DYNAMIC_FTRACE */
25926 #endif /* CONFIG_FUNCTION_TRACER */
25927
25928@@ -305,8 +309,9 @@ ENTRY(ftrace_graph_caller)
25929
25930 restore_mcount_regs
25931
25932+ pax_force_retaddr
25933 retq
25934-END(ftrace_graph_caller)
25935+ENDPROC(ftrace_graph_caller)
25936
25937 GLOBAL(return_to_handler)
25938 subq $24, %rsp
25939@@ -322,5 +327,7 @@ GLOBAL(return_to_handler)
25940 movq 8(%rsp), %rdx
25941 movq (%rsp), %rax
25942 addq $24, %rsp
25943+ pax_force_fptr %rdi
25944 jmp *%rdi
25945+ENDPROC(return_to_handler)
25946 #endif
25947diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
25948index d1ac80b..f593701 100644
25949--- a/arch/x86/kernel/module.c
25950+++ b/arch/x86/kernel/module.c
25951@@ -82,17 +82,17 @@ static unsigned long int get_module_load_offset(void)
25952 }
25953 #endif
25954
25955-void *module_alloc(unsigned long size)
25956+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
25957 {
25958 void *p;
25959
25960- if (PAGE_ALIGN(size) > MODULES_LEN)
25961+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
25962 return NULL;
25963
25964 p = __vmalloc_node_range(size, MODULE_ALIGN,
25965 MODULES_VADDR + get_module_load_offset(),
25966- MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
25967- PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
25968+ MODULES_END, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
25969+ prot, 0, NUMA_NO_NODE,
25970 __builtin_return_address(0));
25971 if (p && (kasan_module_alloc(p, size) < 0)) {
25972 vfree(p);
25973@@ -102,6 +102,51 @@ void *module_alloc(unsigned long size)
25974 return p;
25975 }
25976
25977+void *module_alloc(unsigned long size)
25978+{
25979+
25980+#ifdef CONFIG_PAX_KERNEXEC
25981+ return __module_alloc(size, PAGE_KERNEL);
25982+#else
25983+ return __module_alloc(size, PAGE_KERNEL_EXEC);
25984+#endif
25985+
25986+}
25987+
25988+#ifdef CONFIG_PAX_KERNEXEC
25989+#ifdef CONFIG_X86_32
25990+void *module_alloc_exec(unsigned long size)
25991+{
25992+ struct vm_struct *area;
25993+
25994+ if (size == 0)
25995+ return NULL;
25996+
25997+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
25998+return area ? area->addr : NULL;
25999+}
26000+EXPORT_SYMBOL(module_alloc_exec);
26001+
26002+void module_memfree_exec(void *module_region)
26003+{
26004+ vunmap(module_region);
26005+}
26006+EXPORT_SYMBOL(module_memfree_exec);
26007+#else
26008+void module_memfree_exec(void *module_region)
26009+{
26010+ module_memfree(module_region);
26011+}
26012+EXPORT_SYMBOL(module_memfree_exec);
26013+
26014+void *module_alloc_exec(unsigned long size)
26015+{
26016+ return __module_alloc(size, PAGE_KERNEL_RX);
26017+}
26018+EXPORT_SYMBOL(module_alloc_exec);
26019+#endif
26020+#endif
26021+
26022 #ifdef CONFIG_X86_32
26023 int apply_relocate(Elf32_Shdr *sechdrs,
26024 const char *strtab,
26025@@ -112,14 +157,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26026 unsigned int i;
26027 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
26028 Elf32_Sym *sym;
26029- uint32_t *location;
26030+ uint32_t *plocation, location;
26031
26032 DEBUGP("Applying relocate section %u to %u\n",
26033 relsec, sechdrs[relsec].sh_info);
26034 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
26035 /* This is where to make the change */
26036- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
26037- + rel[i].r_offset;
26038+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
26039+ location = (uint32_t)plocation;
26040+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
26041+ plocation = ktla_ktva((void *)plocation);
26042 /* This is the symbol it is referring to. Note that all
26043 undefined symbols have been resolved. */
26044 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
26045@@ -128,11 +175,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26046 switch (ELF32_R_TYPE(rel[i].r_info)) {
26047 case R_386_32:
26048 /* We add the value into the location given */
26049- *location += sym->st_value;
26050+ pax_open_kernel();
26051+ *plocation += sym->st_value;
26052+ pax_close_kernel();
26053 break;
26054 case R_386_PC32:
26055 /* Add the value, subtract its position */
26056- *location += sym->st_value - (uint32_t)location;
26057+ pax_open_kernel();
26058+ *plocation += sym->st_value - location;
26059+ pax_close_kernel();
26060 break;
26061 default:
26062 pr_err("%s: Unknown relocation: %u\n",
26063@@ -177,21 +228,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
26064 case R_X86_64_NONE:
26065 break;
26066 case R_X86_64_64:
26067+ pax_open_kernel();
26068 *(u64 *)loc = val;
26069+ pax_close_kernel();
26070 break;
26071 case R_X86_64_32:
26072+ pax_open_kernel();
26073 *(u32 *)loc = val;
26074+ pax_close_kernel();
26075 if (val != *(u32 *)loc)
26076 goto overflow;
26077 break;
26078 case R_X86_64_32S:
26079+ pax_open_kernel();
26080 *(s32 *)loc = val;
26081+ pax_close_kernel();
26082 if ((s64)val != *(s32 *)loc)
26083 goto overflow;
26084 break;
26085 case R_X86_64_PC32:
26086 val -= (u64)loc;
26087+ pax_open_kernel();
26088 *(u32 *)loc = val;
26089+ pax_close_kernel();
26090+
26091 #if 0
26092 if ((s64)val != *(s32 *)loc)
26093 goto overflow;
26094diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
26095index 113e707..0a690e1 100644
26096--- a/arch/x86/kernel/msr.c
26097+++ b/arch/x86/kernel/msr.c
26098@@ -39,6 +39,7 @@
26099 #include <linux/notifier.h>
26100 #include <linux/uaccess.h>
26101 #include <linux/gfp.h>
26102+#include <linux/grsecurity.h>
26103
26104 #include <asm/processor.h>
26105 #include <asm/msr.h>
26106@@ -105,6 +106,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
26107 int err = 0;
26108 ssize_t bytes = 0;
26109
26110+#ifdef CONFIG_GRKERNSEC_KMEM
26111+ gr_handle_msr_write();
26112+ return -EPERM;
26113+#endif
26114+
26115 if (count % 8)
26116 return -EINVAL; /* Invalid chunk size */
26117
26118@@ -152,6 +158,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
26119 err = -EBADF;
26120 break;
26121 }
26122+#ifdef CONFIG_GRKERNSEC_KMEM
26123+ gr_handle_msr_write();
26124+ return -EPERM;
26125+#endif
26126 if (copy_from_user(&regs, uregs, sizeof regs)) {
26127 err = -EFAULT;
26128 break;
26129@@ -235,7 +245,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
26130 return notifier_from_errno(err);
26131 }
26132
26133-static struct notifier_block __refdata msr_class_cpu_notifier = {
26134+static struct notifier_block msr_class_cpu_notifier = {
26135 .notifier_call = msr_class_cpu_callback,
26136 };
26137
26138diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
26139index c3e985d..110a36a 100644
26140--- a/arch/x86/kernel/nmi.c
26141+++ b/arch/x86/kernel/nmi.c
26142@@ -98,16 +98,16 @@ fs_initcall(nmi_warning_debugfs);
26143
26144 static void nmi_max_handler(struct irq_work *w)
26145 {
26146- struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
26147+ struct nmiwork *n = container_of(w, struct nmiwork, irq_work);
26148 int remainder_ns, decimal_msecs;
26149- u64 whole_msecs = ACCESS_ONCE(a->max_duration);
26150+ u64 whole_msecs = ACCESS_ONCE(n->max_duration);
26151
26152 remainder_ns = do_div(whole_msecs, (1000 * 1000));
26153 decimal_msecs = remainder_ns / 1000;
26154
26155 printk_ratelimited(KERN_INFO
26156 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
26157- a->handler, whole_msecs, decimal_msecs);
26158+ n->action->handler, whole_msecs, decimal_msecs);
26159 }
26160
26161 static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26162@@ -134,11 +134,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26163 delta = sched_clock() - delta;
26164 trace_nmi_handler(a->handler, (int)delta, thishandled);
26165
26166- if (delta < nmi_longest_ns || delta < a->max_duration)
26167+ if (delta < nmi_longest_ns || delta < a->work->max_duration)
26168 continue;
26169
26170- a->max_duration = delta;
26171- irq_work_queue(&a->irq_work);
26172+ a->work->max_duration = delta;
26173+ irq_work_queue(&a->work->irq_work);
26174 }
26175
26176 rcu_read_unlock();
26177@@ -148,7 +148,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26178 }
26179 NOKPROBE_SYMBOL(nmi_handle);
26180
26181-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26182+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
26183 {
26184 struct nmi_desc *desc = nmi_to_desc(type);
26185 unsigned long flags;
26186@@ -156,7 +156,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26187 if (!action->handler)
26188 return -EINVAL;
26189
26190- init_irq_work(&action->irq_work, nmi_max_handler);
26191+ action->work->action = action;
26192+ init_irq_work(&action->work->irq_work, nmi_max_handler);
26193
26194 spin_lock_irqsave(&desc->lock, flags);
26195
26196@@ -174,9 +175,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26197 * event confuses some handlers (kdump uses this flag)
26198 */
26199 if (action->flags & NMI_FLAG_FIRST)
26200- list_add_rcu(&action->list, &desc->head);
26201+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
26202 else
26203- list_add_tail_rcu(&action->list, &desc->head);
26204+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
26205
26206 spin_unlock_irqrestore(&desc->lock, flags);
26207 return 0;
26208@@ -199,7 +200,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
26209 if (!strcmp(n->name, name)) {
26210 WARN(in_nmi(),
26211 "Trying to free NMI (%s) from NMI context!\n", n->name);
26212- list_del_rcu(&n->list);
26213+ pax_list_del_rcu((struct list_head *)&n->list);
26214 break;
26215 }
26216 }
26217@@ -528,6 +529,17 @@ static inline void nmi_nesting_postprocess(void)
26218 dotraplinkage notrace void
26219 do_nmi(struct pt_regs *regs, long error_code)
26220 {
26221+
26222+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26223+ if (!user_mode(regs)) {
26224+ unsigned long cs = regs->cs & 0xFFFF;
26225+ unsigned long ip = ktva_ktla(regs->ip);
26226+
26227+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
26228+ regs->ip = ip;
26229+ }
26230+#endif
26231+
26232 nmi_nesting_preprocess(regs);
26233
26234 nmi_enter();
26235diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
26236index 6d9582e..f746287 100644
26237--- a/arch/x86/kernel/nmi_selftest.c
26238+++ b/arch/x86/kernel/nmi_selftest.c
26239@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
26240 {
26241 /* trap all the unknown NMIs we may generate */
26242 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
26243- __initdata);
26244+ __initconst);
26245 }
26246
26247 static void __init cleanup_nmi_testsuite(void)
26248@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
26249 unsigned long timeout;
26250
26251 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
26252- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
26253+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
26254 nmi_fail = FAILURE;
26255 return;
26256 }
26257diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
26258index bbb6c73..24a58ef 100644
26259--- a/arch/x86/kernel/paravirt-spinlocks.c
26260+++ b/arch/x86/kernel/paravirt-spinlocks.c
26261@@ -8,7 +8,7 @@
26262
26263 #include <asm/paravirt.h>
26264
26265-struct pv_lock_ops pv_lock_ops = {
26266+struct pv_lock_ops pv_lock_ops __read_only = {
26267 #ifdef CONFIG_SMP
26268 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
26269 .unlock_kick = paravirt_nop,
26270diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
26271index 548d25f..f8fb99c 100644
26272--- a/arch/x86/kernel/paravirt.c
26273+++ b/arch/x86/kernel/paravirt.c
26274@@ -56,6 +56,9 @@ u64 _paravirt_ident_64(u64 x)
26275 {
26276 return x;
26277 }
26278+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26279+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
26280+#endif
26281
26282 void __init default_banner(void)
26283 {
26284@@ -142,16 +145,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
26285
26286 if (opfunc == NULL)
26287 /* If there's no function, patch it with a ud2a (BUG) */
26288- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
26289- else if (opfunc == _paravirt_nop)
26290+ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
26291+ else if (opfunc == (void *)_paravirt_nop)
26292 /* If the operation is a nop, then nop the callsite */
26293 ret = paravirt_patch_nop();
26294
26295 /* identity functions just return their single argument */
26296- else if (opfunc == _paravirt_ident_32)
26297+ else if (opfunc == (void *)_paravirt_ident_32)
26298 ret = paravirt_patch_ident_32(insnbuf, len);
26299- else if (opfunc == _paravirt_ident_64)
26300+ else if (opfunc == (void *)_paravirt_ident_64)
26301 ret = paravirt_patch_ident_64(insnbuf, len);
26302+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26303+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
26304+ ret = paravirt_patch_ident_64(insnbuf, len);
26305+#endif
26306
26307 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
26308 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
26309@@ -176,7 +183,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
26310 if (insn_len > len || start == NULL)
26311 insn_len = len;
26312 else
26313- memcpy(insnbuf, start, insn_len);
26314+ memcpy(insnbuf, ktla_ktva(start), insn_len);
26315
26316 return insn_len;
26317 }
26318@@ -300,7 +307,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
26319 return this_cpu_read(paravirt_lazy_mode);
26320 }
26321
26322-struct pv_info pv_info = {
26323+struct pv_info pv_info __read_only = {
26324 .name = "bare hardware",
26325 .paravirt_enabled = 0,
26326 .kernel_rpl = 0,
26327@@ -311,16 +318,16 @@ struct pv_info pv_info = {
26328 #endif
26329 };
26330
26331-struct pv_init_ops pv_init_ops = {
26332+struct pv_init_ops pv_init_ops __read_only = {
26333 .patch = native_patch,
26334 };
26335
26336-struct pv_time_ops pv_time_ops = {
26337+struct pv_time_ops pv_time_ops __read_only = {
26338 .sched_clock = native_sched_clock,
26339 .steal_clock = native_steal_clock,
26340 };
26341
26342-__visible struct pv_irq_ops pv_irq_ops = {
26343+__visible struct pv_irq_ops pv_irq_ops __read_only = {
26344 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
26345 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
26346 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
26347@@ -332,7 +339,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
26348 #endif
26349 };
26350
26351-__visible struct pv_cpu_ops pv_cpu_ops = {
26352+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
26353 .cpuid = native_cpuid,
26354 .get_debugreg = native_get_debugreg,
26355 .set_debugreg = native_set_debugreg,
26356@@ -395,21 +402,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
26357 NOKPROBE_SYMBOL(native_set_debugreg);
26358 NOKPROBE_SYMBOL(native_load_idt);
26359
26360-struct pv_apic_ops pv_apic_ops = {
26361+struct pv_apic_ops pv_apic_ops __read_only= {
26362 #ifdef CONFIG_X86_LOCAL_APIC
26363 .startup_ipi_hook = paravirt_nop,
26364 #endif
26365 };
26366
26367-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
26368+#ifdef CONFIG_X86_32
26369+#ifdef CONFIG_X86_PAE
26370+/* 64-bit pagetable entries */
26371+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
26372+#else
26373 /* 32-bit pagetable entries */
26374 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
26375+#endif
26376 #else
26377 /* 64-bit pagetable entries */
26378 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
26379 #endif
26380
26381-struct pv_mmu_ops pv_mmu_ops = {
26382+struct pv_mmu_ops pv_mmu_ops __read_only = {
26383
26384 .read_cr2 = native_read_cr2,
26385 .write_cr2 = native_write_cr2,
26386@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
26387 .make_pud = PTE_IDENT,
26388
26389 .set_pgd = native_set_pgd,
26390+ .set_pgd_batched = native_set_pgd_batched,
26391 #endif
26392 #endif /* PAGETABLE_LEVELS >= 3 */
26393
26394@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
26395 },
26396
26397 .set_fixmap = native_set_fixmap,
26398+
26399+#ifdef CONFIG_PAX_KERNEXEC
26400+ .pax_open_kernel = native_pax_open_kernel,
26401+ .pax_close_kernel = native_pax_close_kernel,
26402+#endif
26403+
26404 };
26405
26406 EXPORT_SYMBOL_GPL(pv_time_ops);
26407diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
26408index a1da673..b6f5831 100644
26409--- a/arch/x86/kernel/paravirt_patch_64.c
26410+++ b/arch/x86/kernel/paravirt_patch_64.c
26411@@ -9,7 +9,11 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
26412 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
26413 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
26414 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
26415+
26416+#ifndef CONFIG_PAX_MEMORY_UDEREF
26417 DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
26418+#endif
26419+
26420 DEF_NATIVE(pv_cpu_ops, clts, "clts");
26421 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
26422
26423@@ -57,7 +61,11 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
26424 PATCH_SITE(pv_mmu_ops, read_cr3);
26425 PATCH_SITE(pv_mmu_ops, write_cr3);
26426 PATCH_SITE(pv_cpu_ops, clts);
26427+
26428+#ifndef CONFIG_PAX_MEMORY_UDEREF
26429 PATCH_SITE(pv_mmu_ops, flush_tlb_single);
26430+#endif
26431+
26432 PATCH_SITE(pv_cpu_ops, wbinvd);
26433
26434 patch_site:
26435diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
26436index 0497f71..7186c0d 100644
26437--- a/arch/x86/kernel/pci-calgary_64.c
26438+++ b/arch/x86/kernel/pci-calgary_64.c
26439@@ -1347,7 +1347,7 @@ static void __init get_tce_space_from_tar(void)
26440 tce_space = be64_to_cpu(readq(target));
26441 tce_space = tce_space & TAR_SW_BITS;
26442
26443- tce_space = tce_space & (~specified_table_size);
26444+ tce_space = tce_space & (~(unsigned long)specified_table_size);
26445 info->tce_space = (u64 *)__va(tce_space);
26446 }
26447 }
26448diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
26449index 35ccf75..7a15747 100644
26450--- a/arch/x86/kernel/pci-iommu_table.c
26451+++ b/arch/x86/kernel/pci-iommu_table.c
26452@@ -2,7 +2,7 @@
26453 #include <asm/iommu_table.h>
26454 #include <linux/string.h>
26455 #include <linux/kallsyms.h>
26456-
26457+#include <linux/sched.h>
26458
26459 #define DEBUG 1
26460
26461diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
26462index 77dd0ad..9ec4723 100644
26463--- a/arch/x86/kernel/pci-swiotlb.c
26464+++ b/arch/x86/kernel/pci-swiotlb.c
26465@@ -33,7 +33,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
26466 struct dma_attrs *attrs)
26467 {
26468 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
26469- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
26470+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
26471 else
26472 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
26473 }
26474diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
26475index a388bb8..97064ad 100644
26476--- a/arch/x86/kernel/process.c
26477+++ b/arch/x86/kernel/process.c
26478@@ -38,7 +38,8 @@
26479 * section. Since TSS's are completely CPU-local, we want them
26480 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
26481 */
26482-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
26483+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
26484+EXPORT_SYMBOL(init_tss);
26485
26486 #ifdef CONFIG_X86_64
26487 static DEFINE_PER_CPU(unsigned char, is_idle);
26488@@ -96,7 +97,7 @@ void arch_task_cache_init(void)
26489 task_xstate_cachep =
26490 kmem_cache_create("task_xstate", xstate_size,
26491 __alignof__(union thread_xstate),
26492- SLAB_PANIC | SLAB_NOTRACK, NULL);
26493+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
26494 setup_xstate_comp();
26495 }
26496
26497@@ -110,7 +111,7 @@ void exit_thread(void)
26498 unsigned long *bp = t->io_bitmap_ptr;
26499
26500 if (bp) {
26501- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
26502+ struct tss_struct *tss = init_tss + get_cpu();
26503
26504 t->io_bitmap_ptr = NULL;
26505 clear_thread_flag(TIF_IO_BITMAP);
26506@@ -130,6 +131,9 @@ void flush_thread(void)
26507 {
26508 struct task_struct *tsk = current;
26509
26510+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
26511+ loadsegment(gs, 0);
26512+#endif
26513 flush_ptrace_hw_breakpoint(tsk);
26514 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
26515 drop_init_fpu(tsk);
26516@@ -276,7 +280,7 @@ static void __exit_idle(void)
26517 void exit_idle(void)
26518 {
26519 /* idle loop has pid 0 */
26520- if (current->pid)
26521+ if (task_pid_nr(current))
26522 return;
26523 __exit_idle();
26524 }
26525@@ -329,7 +333,7 @@ bool xen_set_default_idle(void)
26526 return ret;
26527 }
26528 #endif
26529-void stop_this_cpu(void *dummy)
26530+__noreturn void stop_this_cpu(void *dummy)
26531 {
26532 local_irq_disable();
26533 /*
26534@@ -508,16 +512,37 @@ static int __init idle_setup(char *str)
26535 }
26536 early_param("idle", idle_setup);
26537
26538-unsigned long arch_align_stack(unsigned long sp)
26539+#ifdef CONFIG_PAX_RANDKSTACK
26540+void pax_randomize_kstack(struct pt_regs *regs)
26541 {
26542- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
26543- sp -= get_random_int() % 8192;
26544- return sp & ~0xf;
26545-}
26546+ struct thread_struct *thread = &current->thread;
26547+ unsigned long time;
26548
26549-unsigned long arch_randomize_brk(struct mm_struct *mm)
26550-{
26551- unsigned long range_end = mm->brk + 0x02000000;
26552- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
26553-}
26554+ if (!randomize_va_space)
26555+ return;
26556+
26557+ if (v8086_mode(regs))
26558+ return;
26559
26560+ rdtscl(time);
26561+
26562+ /* P4 seems to return a 0 LSB, ignore it */
26563+#ifdef CONFIG_MPENTIUM4
26564+ time &= 0x3EUL;
26565+ time <<= 2;
26566+#elif defined(CONFIG_X86_64)
26567+ time &= 0xFUL;
26568+ time <<= 4;
26569+#else
26570+ time &= 0x1FUL;
26571+ time <<= 3;
26572+#endif
26573+
26574+ thread->sp0 ^= time;
26575+ load_sp0(init_tss + smp_processor_id(), thread);
26576+
26577+#ifdef CONFIG_X86_64
26578+ this_cpu_write(kernel_stack, thread->sp0);
26579+#endif
26580+}
26581+#endif
26582diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
26583index 603c4f9..3a105d7 100644
26584--- a/arch/x86/kernel/process_32.c
26585+++ b/arch/x86/kernel/process_32.c
26586@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
26587 unsigned long thread_saved_pc(struct task_struct *tsk)
26588 {
26589 return ((unsigned long *)tsk->thread.sp)[3];
26590+//XXX return tsk->thread.eip;
26591 }
26592
26593 void __show_regs(struct pt_regs *regs, int all)
26594@@ -73,19 +74,18 @@ void __show_regs(struct pt_regs *regs, int all)
26595 unsigned long sp;
26596 unsigned short ss, gs;
26597
26598- if (user_mode_vm(regs)) {
26599+ if (user_mode(regs)) {
26600 sp = regs->sp;
26601 ss = regs->ss & 0xffff;
26602- gs = get_user_gs(regs);
26603 } else {
26604 sp = kernel_stack_pointer(regs);
26605 savesegment(ss, ss);
26606- savesegment(gs, gs);
26607 }
26608+ gs = get_user_gs(regs);
26609
26610 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
26611 (u16)regs->cs, regs->ip, regs->flags,
26612- smp_processor_id());
26613+ raw_smp_processor_id());
26614 print_symbol("EIP is at %s\n", regs->ip);
26615
26616 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
26617@@ -132,21 +132,22 @@ void release_thread(struct task_struct *dead_task)
26618 int copy_thread(unsigned long clone_flags, unsigned long sp,
26619 unsigned long arg, struct task_struct *p)
26620 {
26621- struct pt_regs *childregs = task_pt_regs(p);
26622+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
26623 struct task_struct *tsk;
26624 int err;
26625
26626 p->thread.sp = (unsigned long) childregs;
26627 p->thread.sp0 = (unsigned long) (childregs+1);
26628+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26629 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26630
26631 if (unlikely(p->flags & PF_KTHREAD)) {
26632 /* kernel thread */
26633 memset(childregs, 0, sizeof(struct pt_regs));
26634 p->thread.ip = (unsigned long) ret_from_kernel_thread;
26635- task_user_gs(p) = __KERNEL_STACK_CANARY;
26636- childregs->ds = __USER_DS;
26637- childregs->es = __USER_DS;
26638+ savesegment(gs, childregs->gs);
26639+ childregs->ds = __KERNEL_DS;
26640+ childregs->es = __KERNEL_DS;
26641 childregs->fs = __KERNEL_PERCPU;
26642 childregs->bx = sp; /* function */
26643 childregs->bp = arg;
26644@@ -248,7 +249,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26645 struct thread_struct *prev = &prev_p->thread,
26646 *next = &next_p->thread;
26647 int cpu = smp_processor_id();
26648- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26649+ struct tss_struct *tss = init_tss + cpu;
26650 fpu_switch_t fpu;
26651
26652 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
26653@@ -272,6 +273,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26654 */
26655 lazy_save_gs(prev->gs);
26656
26657+#ifdef CONFIG_PAX_MEMORY_UDEREF
26658+ __set_fs(task_thread_info(next_p)->addr_limit);
26659+#endif
26660+
26661 /*
26662 * Load the per-thread Thread-Local Storage descriptor.
26663 */
26664@@ -310,9 +315,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26665 */
26666 arch_end_context_switch(next_p);
26667
26668- this_cpu_write(kernel_stack,
26669- (unsigned long)task_stack_page(next_p) +
26670- THREAD_SIZE - KERNEL_STACK_OFFSET);
26671+ this_cpu_write(current_task, next_p);
26672+ this_cpu_write(current_tinfo, &next_p->tinfo);
26673+ this_cpu_write(kernel_stack, next->sp0);
26674
26675 /*
26676 * Restore %gs if needed (which is common)
26677@@ -322,8 +327,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26678
26679 switch_fpu_finish(next_p, fpu);
26680
26681- this_cpu_write(current_task, next_p);
26682-
26683 return prev_p;
26684 }
26685
26686@@ -353,4 +356,3 @@ unsigned long get_wchan(struct task_struct *p)
26687 } while (count++ < 16);
26688 return 0;
26689 }
26690-
26691diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
26692index 67fcc43..0d2c630 100644
26693--- a/arch/x86/kernel/process_64.c
26694+++ b/arch/x86/kernel/process_64.c
26695@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26696 struct pt_regs *childregs;
26697 struct task_struct *me = current;
26698
26699- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
26700+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
26701 childregs = task_pt_regs(p);
26702 p->thread.sp = (unsigned long) childregs;
26703 p->thread.usersp = me->thread.usersp;
26704+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26705 set_tsk_thread_flag(p, TIF_FORK);
26706 p->thread.io_bitmap_ptr = NULL;
26707
26708@@ -171,6 +172,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26709 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
26710 savesegment(es, p->thread.es);
26711 savesegment(ds, p->thread.ds);
26712+ savesegment(ss, p->thread.ss);
26713+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
26714 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26715
26716 if (unlikely(p->flags & PF_KTHREAD)) {
26717@@ -277,7 +280,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26718 struct thread_struct *prev = &prev_p->thread;
26719 struct thread_struct *next = &next_p->thread;
26720 int cpu = smp_processor_id();
26721- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26722+ struct tss_struct *tss = init_tss + cpu;
26723 unsigned fsindex, gsindex;
26724 fpu_switch_t fpu;
26725
26726@@ -331,6 +334,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26727 if (unlikely(next->ds | prev->ds))
26728 loadsegment(ds, next->ds);
26729
26730+ savesegment(ss, prev->ss);
26731+ if (unlikely(next->ss != prev->ss))
26732+ loadsegment(ss, next->ss);
26733+
26734 /*
26735 * Switch FS and GS.
26736 *
26737@@ -404,6 +411,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26738 prev->usersp = this_cpu_read(old_rsp);
26739 this_cpu_write(old_rsp, next->usersp);
26740 this_cpu_write(current_task, next_p);
26741+ this_cpu_write(current_tinfo, &next_p->tinfo);
26742
26743 /*
26744 * If it were not for PREEMPT_ACTIVE we could guarantee that the
26745@@ -413,9 +421,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26746 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
26747 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
26748
26749- this_cpu_write(kernel_stack,
26750- (unsigned long)task_stack_page(next_p) +
26751- THREAD_SIZE - KERNEL_STACK_OFFSET);
26752+ this_cpu_write(kernel_stack, next->sp0);
26753
26754 /*
26755 * Now maybe reload the debug registers and handle I/O bitmaps
26756@@ -485,12 +491,11 @@ unsigned long get_wchan(struct task_struct *p)
26757 if (!p || p == current || p->state == TASK_RUNNING)
26758 return 0;
26759 stack = (unsigned long)task_stack_page(p);
26760- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
26761+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
26762 return 0;
26763 fp = *(u64 *)(p->thread.sp);
26764 do {
26765- if (fp < (unsigned long)stack ||
26766- fp >= (unsigned long)stack+THREAD_SIZE)
26767+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
26768 return 0;
26769 ip = *(u64 *)(fp+8);
26770 if (!in_sched_functions(ip))
26771diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
26772index e510618..5165ac0 100644
26773--- a/arch/x86/kernel/ptrace.c
26774+++ b/arch/x86/kernel/ptrace.c
26775@@ -186,10 +186,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
26776 unsigned long sp = (unsigned long)&regs->sp;
26777 u32 *prev_esp;
26778
26779- if (context == (sp & ~(THREAD_SIZE - 1)))
26780+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
26781 return sp;
26782
26783- prev_esp = (u32 *)(context);
26784+ prev_esp = *(u32 **)(context);
26785 if (prev_esp)
26786 return (unsigned long)prev_esp;
26787
26788@@ -452,6 +452,20 @@ static int putreg(struct task_struct *child,
26789 if (child->thread.gs != value)
26790 return do_arch_prctl(child, ARCH_SET_GS, value);
26791 return 0;
26792+
26793+ case offsetof(struct user_regs_struct,ip):
26794+ /*
26795+ * Protect against any attempt to set ip to an
26796+ * impossible address. There are dragons lurking if the
26797+ * address is noncanonical. (This explicitly allows
26798+ * setting ip to TASK_SIZE_MAX, because user code can do
26799+ * that all by itself by running off the end of its
26800+ * address space.
26801+ */
26802+ if (value > TASK_SIZE_MAX)
26803+ return -EIO;
26804+ break;
26805+
26806 #endif
26807 }
26808
26809@@ -588,7 +602,7 @@ static void ptrace_triggered(struct perf_event *bp,
26810 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
26811 {
26812 int i;
26813- int dr7 = 0;
26814+ unsigned long dr7 = 0;
26815 struct arch_hw_breakpoint *info;
26816
26817 for (i = 0; i < HBP_NUM; i++) {
26818@@ -822,7 +836,7 @@ long arch_ptrace(struct task_struct *child, long request,
26819 unsigned long addr, unsigned long data)
26820 {
26821 int ret;
26822- unsigned long __user *datap = (unsigned long __user *)data;
26823+ unsigned long __user *datap = (__force unsigned long __user *)data;
26824
26825 switch (request) {
26826 /* read the word at location addr in the USER area. */
26827@@ -907,14 +921,14 @@ long arch_ptrace(struct task_struct *child, long request,
26828 if ((int) addr < 0)
26829 return -EIO;
26830 ret = do_get_thread_area(child, addr,
26831- (struct user_desc __user *)data);
26832+ (__force struct user_desc __user *) data);
26833 break;
26834
26835 case PTRACE_SET_THREAD_AREA:
26836 if ((int) addr < 0)
26837 return -EIO;
26838 ret = do_set_thread_area(child, addr,
26839- (struct user_desc __user *)data, 0);
26840+ (__force struct user_desc __user *) data, 0);
26841 break;
26842 #endif
26843
26844@@ -1292,7 +1306,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
26845
26846 #ifdef CONFIG_X86_64
26847
26848-static struct user_regset x86_64_regsets[] __read_mostly = {
26849+static user_regset_no_const x86_64_regsets[] __read_only = {
26850 [REGSET_GENERAL] = {
26851 .core_note_type = NT_PRSTATUS,
26852 .n = sizeof(struct user_regs_struct) / sizeof(long),
26853@@ -1333,7 +1347,7 @@ static const struct user_regset_view user_x86_64_view = {
26854 #endif /* CONFIG_X86_64 */
26855
26856 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
26857-static struct user_regset x86_32_regsets[] __read_mostly = {
26858+static user_regset_no_const x86_32_regsets[] __read_only = {
26859 [REGSET_GENERAL] = {
26860 .core_note_type = NT_PRSTATUS,
26861 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
26862@@ -1386,7 +1400,7 @@ static const struct user_regset_view user_x86_32_view = {
26863 */
26864 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
26865
26866-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26867+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26868 {
26869 #ifdef CONFIG_X86_64
26870 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
26871@@ -1421,7 +1435,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
26872 memset(info, 0, sizeof(*info));
26873 info->si_signo = SIGTRAP;
26874 info->si_code = si_code;
26875- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
26876+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
26877 }
26878
26879 void user_single_step_siginfo(struct task_struct *tsk,
26880@@ -1455,6 +1469,10 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
26881 }
26882 }
26883
26884+#ifdef CONFIG_GRKERNSEC_SETXID
26885+extern void gr_delayed_cred_worker(void);
26886+#endif
26887+
26888 /*
26889 * We can return 0 to resume the syscall or anything else to go to phase
26890 * 2. If we resume the syscall, we need to put something appropriate in
26891@@ -1562,6 +1580,11 @@ long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
26892
26893 BUG_ON(regs != task_pt_regs(current));
26894
26895+#ifdef CONFIG_GRKERNSEC_SETXID
26896+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26897+ gr_delayed_cred_worker();
26898+#endif
26899+
26900 /*
26901 * If we stepped into a sysenter/syscall insn, it trapped in
26902 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
26903@@ -1620,6 +1643,11 @@ void syscall_trace_leave(struct pt_regs *regs)
26904 */
26905 user_exit();
26906
26907+#ifdef CONFIG_GRKERNSEC_SETXID
26908+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26909+ gr_delayed_cred_worker();
26910+#endif
26911+
26912 audit_syscall_exit(regs);
26913
26914 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
26915diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
26916index e5ecd20..60f7eef 100644
26917--- a/arch/x86/kernel/pvclock.c
26918+++ b/arch/x86/kernel/pvclock.c
26919@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
26920 reset_hung_task_detector();
26921 }
26922
26923-static atomic64_t last_value = ATOMIC64_INIT(0);
26924+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
26925
26926 void pvclock_resume(void)
26927 {
26928- atomic64_set(&last_value, 0);
26929+ atomic64_set_unchecked(&last_value, 0);
26930 }
26931
26932 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
26933@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
26934 * updating at the same time, and one of them could be slightly behind,
26935 * making the assumption that last_value always go forward fail to hold.
26936 */
26937- last = atomic64_read(&last_value);
26938+ last = atomic64_read_unchecked(&last_value);
26939 do {
26940 if (ret < last)
26941 return last;
26942- last = atomic64_cmpxchg(&last_value, last, ret);
26943+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
26944 } while (unlikely(last != ret));
26945
26946 return ret;
26947diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
26948index 86db4bc..a50a54a 100644
26949--- a/arch/x86/kernel/reboot.c
26950+++ b/arch/x86/kernel/reboot.c
26951@@ -70,6 +70,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
26952
26953 void __noreturn machine_real_restart(unsigned int type)
26954 {
26955+
26956+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
26957+ struct desc_struct *gdt;
26958+#endif
26959+
26960 local_irq_disable();
26961
26962 /*
26963@@ -97,7 +102,29 @@ void __noreturn machine_real_restart(unsigned int type)
26964
26965 /* Jump to the identity-mapped low memory code */
26966 #ifdef CONFIG_X86_32
26967- asm volatile("jmpl *%0" : :
26968+
26969+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
26970+ gdt = get_cpu_gdt_table(smp_processor_id());
26971+ pax_open_kernel();
26972+#ifdef CONFIG_PAX_MEMORY_UDEREF
26973+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
26974+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
26975+ loadsegment(ds, __KERNEL_DS);
26976+ loadsegment(es, __KERNEL_DS);
26977+ loadsegment(ss, __KERNEL_DS);
26978+#endif
26979+#ifdef CONFIG_PAX_KERNEXEC
26980+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
26981+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
26982+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
26983+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
26984+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
26985+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
26986+#endif
26987+ pax_close_kernel();
26988+#endif
26989+
26990+ asm volatile("ljmpl *%0" : :
26991 "rm" (real_mode_header->machine_real_restart_asm),
26992 "a" (type));
26993 #else
26994@@ -137,7 +164,7 @@ static int __init set_kbd_reboot(const struct dmi_system_id *d)
26995 /*
26996 * This is a single dmi_table handling all reboot quirks.
26997 */
26998-static struct dmi_system_id __initdata reboot_dmi_table[] = {
26999+static const struct dmi_system_id __initconst reboot_dmi_table[] = {
27000
27001 /* Acer */
27002 { /* Handle reboot issue on Acer Aspire one */
27003@@ -511,7 +538,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
27004 * This means that this function can never return, it can misbehave
27005 * by not rebooting properly and hanging.
27006 */
27007-static void native_machine_emergency_restart(void)
27008+static void __noreturn native_machine_emergency_restart(void)
27009 {
27010 int i;
27011 int attempt = 0;
27012@@ -631,13 +658,13 @@ void native_machine_shutdown(void)
27013 #endif
27014 }
27015
27016-static void __machine_emergency_restart(int emergency)
27017+static void __noreturn __machine_emergency_restart(int emergency)
27018 {
27019 reboot_emergency = emergency;
27020 machine_ops.emergency_restart();
27021 }
27022
27023-static void native_machine_restart(char *__unused)
27024+static void __noreturn native_machine_restart(char *__unused)
27025 {
27026 pr_notice("machine restart\n");
27027
27028@@ -646,7 +673,7 @@ static void native_machine_restart(char *__unused)
27029 __machine_emergency_restart(0);
27030 }
27031
27032-static void native_machine_halt(void)
27033+static void __noreturn native_machine_halt(void)
27034 {
27035 /* Stop other cpus and apics */
27036 machine_shutdown();
27037@@ -656,7 +683,7 @@ static void native_machine_halt(void)
27038 stop_this_cpu(NULL);
27039 }
27040
27041-static void native_machine_power_off(void)
27042+static void __noreturn native_machine_power_off(void)
27043 {
27044 if (pm_power_off) {
27045 if (!reboot_force)
27046@@ -665,9 +692,10 @@ static void native_machine_power_off(void)
27047 }
27048 /* A fallback in case there is no PM info available */
27049 tboot_shutdown(TB_SHUTDOWN_HALT);
27050+ unreachable();
27051 }
27052
27053-struct machine_ops machine_ops = {
27054+struct machine_ops machine_ops __read_only = {
27055 .power_off = native_machine_power_off,
27056 .shutdown = native_machine_shutdown,
27057 .emergency_restart = native_machine_emergency_restart,
27058diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
27059index c8e41e9..64049ef 100644
27060--- a/arch/x86/kernel/reboot_fixups_32.c
27061+++ b/arch/x86/kernel/reboot_fixups_32.c
27062@@ -57,7 +57,7 @@ struct device_fixup {
27063 unsigned int vendor;
27064 unsigned int device;
27065 void (*reboot_fixup)(struct pci_dev *);
27066-};
27067+} __do_const;
27068
27069 /*
27070 * PCI ids solely used for fixups_table go here
27071diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
27072index 3fd2c69..a444264 100644
27073--- a/arch/x86/kernel/relocate_kernel_64.S
27074+++ b/arch/x86/kernel/relocate_kernel_64.S
27075@@ -96,8 +96,7 @@ relocate_kernel:
27076
27077 /* jump to identity mapped page */
27078 addq $(identity_mapped - relocate_kernel), %r8
27079- pushq %r8
27080- ret
27081+ jmp *%r8
27082
27083 identity_mapped:
27084 /* set return address to 0 if not preserving context */
27085diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
27086index 0a2421c..11f3f36 100644
27087--- a/arch/x86/kernel/setup.c
27088+++ b/arch/x86/kernel/setup.c
27089@@ -111,6 +111,7 @@
27090 #include <asm/mce.h>
27091 #include <asm/alternative.h>
27092 #include <asm/prom.h>
27093+#include <asm/boot.h>
27094
27095 /*
27096 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
27097@@ -206,10 +207,12 @@ EXPORT_SYMBOL(boot_cpu_data);
27098 #endif
27099
27100
27101-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
27102-__visible unsigned long mmu_cr4_features;
27103+#ifdef CONFIG_X86_64
27104+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
27105+#elif defined(CONFIG_X86_PAE)
27106+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
27107 #else
27108-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
27109+__visible unsigned long mmu_cr4_features __read_only;
27110 #endif
27111
27112 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
27113@@ -771,7 +774,7 @@ static void __init trim_bios_range(void)
27114 * area (640->1Mb) as ram even though it is not.
27115 * take them out.
27116 */
27117- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
27118+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
27119
27120 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
27121 }
27122@@ -779,7 +782,7 @@ static void __init trim_bios_range(void)
27123 /* called before trim_bios_range() to spare extra sanitize */
27124 static void __init e820_add_kernel_range(void)
27125 {
27126- u64 start = __pa_symbol(_text);
27127+ u64 start = __pa_symbol(ktla_ktva(_text));
27128 u64 size = __pa_symbol(_end) - start;
27129
27130 /*
27131@@ -855,8 +858,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
27132
27133 void __init setup_arch(char **cmdline_p)
27134 {
27135+#ifdef CONFIG_X86_32
27136+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
27137+#else
27138 memblock_reserve(__pa_symbol(_text),
27139 (unsigned long)__bss_stop - (unsigned long)_text);
27140+#endif
27141
27142 early_reserve_initrd();
27143
27144@@ -954,16 +961,16 @@ void __init setup_arch(char **cmdline_p)
27145
27146 if (!boot_params.hdr.root_flags)
27147 root_mountflags &= ~MS_RDONLY;
27148- init_mm.start_code = (unsigned long) _text;
27149- init_mm.end_code = (unsigned long) _etext;
27150+ init_mm.start_code = ktla_ktva((unsigned long) _text);
27151+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
27152 init_mm.end_data = (unsigned long) _edata;
27153 init_mm.brk = _brk_end;
27154
27155 mpx_mm_init(&init_mm);
27156
27157- code_resource.start = __pa_symbol(_text);
27158- code_resource.end = __pa_symbol(_etext)-1;
27159- data_resource.start = __pa_symbol(_etext);
27160+ code_resource.start = __pa_symbol(ktla_ktva(_text));
27161+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
27162+ data_resource.start = __pa_symbol(_sdata);
27163 data_resource.end = __pa_symbol(_edata)-1;
27164 bss_resource.start = __pa_symbol(__bss_start);
27165 bss_resource.end = __pa_symbol(__bss_stop)-1;
27166diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
27167index e4fcb87..9c06c55 100644
27168--- a/arch/x86/kernel/setup_percpu.c
27169+++ b/arch/x86/kernel/setup_percpu.c
27170@@ -21,19 +21,17 @@
27171 #include <asm/cpu.h>
27172 #include <asm/stackprotector.h>
27173
27174-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
27175+#ifdef CONFIG_SMP
27176+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
27177 EXPORT_PER_CPU_SYMBOL(cpu_number);
27178+#endif
27179
27180-#ifdef CONFIG_X86_64
27181 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
27182-#else
27183-#define BOOT_PERCPU_OFFSET 0
27184-#endif
27185
27186 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
27187 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
27188
27189-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
27190+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
27191 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
27192 };
27193 EXPORT_SYMBOL(__per_cpu_offset);
27194@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
27195 {
27196 #ifdef CONFIG_NEED_MULTIPLE_NODES
27197 pg_data_t *last = NULL;
27198- unsigned int cpu;
27199+ int cpu;
27200
27201 for_each_possible_cpu(cpu) {
27202 int node = early_cpu_to_node(cpu);
27203@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
27204 {
27205 #ifdef CONFIG_X86_32
27206 struct desc_struct gdt;
27207+ unsigned long base = per_cpu_offset(cpu);
27208
27209- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
27210- 0x2 | DESCTYPE_S, 0x8);
27211- gdt.s = 1;
27212+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
27213+ 0x83 | DESCTYPE_S, 0xC);
27214 write_gdt_entry(get_cpu_gdt_table(cpu),
27215 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
27216 #endif
27217@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
27218 /* alrighty, percpu areas up and running */
27219 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
27220 for_each_possible_cpu(cpu) {
27221+#ifdef CONFIG_CC_STACKPROTECTOR
27222+#ifdef CONFIG_X86_32
27223+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
27224+#endif
27225+#endif
27226 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
27227 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
27228 per_cpu(cpu_number, cpu) = cpu;
27229@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
27230 */
27231 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
27232 #endif
27233+#ifdef CONFIG_CC_STACKPROTECTOR
27234+#ifdef CONFIG_X86_32
27235+ if (!cpu)
27236+ per_cpu(stack_canary.canary, cpu) = canary;
27237+#endif
27238+#endif
27239 /*
27240 * Up to this point, the boot CPU has been using .init.data
27241 * area. Reload any changed state for the boot CPU.
27242diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
27243index e504246..ba10432 100644
27244--- a/arch/x86/kernel/signal.c
27245+++ b/arch/x86/kernel/signal.c
27246@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
27247 * Align the stack pointer according to the i386 ABI,
27248 * i.e. so that on function entry ((sp + 4) & 15) == 0.
27249 */
27250- sp = ((sp + 4) & -16ul) - 4;
27251+ sp = ((sp - 12) & -16ul) - 4;
27252 #else /* !CONFIG_X86_32 */
27253 sp = round_down(sp, 16) - 8;
27254 #endif
27255@@ -298,10 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27256 }
27257
27258 if (current->mm->context.vdso)
27259- restorer = current->mm->context.vdso +
27260- selected_vdso32->sym___kernel_sigreturn;
27261+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_sigreturn);
27262 else
27263- restorer = &frame->retcode;
27264+ restorer = (void __user *)&frame->retcode;
27265 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27266 restorer = ksig->ka.sa.sa_restorer;
27267
27268@@ -315,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27269 * reasons and because gdb uses it as a signature to notice
27270 * signal handler stack frames.
27271 */
27272- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
27273+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
27274
27275 if (err)
27276 return -EFAULT;
27277@@ -362,8 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27278 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
27279
27280 /* Set up to return from userspace. */
27281- restorer = current->mm->context.vdso +
27282- selected_vdso32->sym___kernel_rt_sigreturn;
27283+ if (current->mm->context.vdso)
27284+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_rt_sigreturn);
27285+ else
27286+ restorer = (void __user *)&frame->retcode;
27287 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27288 restorer = ksig->ka.sa.sa_restorer;
27289 put_user_ex(restorer, &frame->pretcode);
27290@@ -375,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27291 * reasons and because gdb uses it as a signature to notice
27292 * signal handler stack frames.
27293 */
27294- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
27295+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
27296 } put_user_catch(err);
27297
27298 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
27299@@ -611,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27300 {
27301 int usig = signr_convert(ksig->sig);
27302 sigset_t *set = sigmask_to_save();
27303- compat_sigset_t *cset = (compat_sigset_t *) set;
27304+ sigset_t sigcopy;
27305+ compat_sigset_t *cset;
27306+
27307+ sigcopy = *set;
27308+
27309+ cset = (compat_sigset_t *) &sigcopy;
27310
27311 /* Set up the stack frame */
27312 if (is_ia32_frame()) {
27313@@ -622,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27314 } else if (is_x32_frame()) {
27315 return x32_setup_rt_frame(ksig, cset, regs);
27316 } else {
27317- return __setup_rt_frame(ksig->sig, ksig, set, regs);
27318+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
27319 }
27320 }
27321
27322diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
27323index be8e1bd..a3d93fa 100644
27324--- a/arch/x86/kernel/smp.c
27325+++ b/arch/x86/kernel/smp.c
27326@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
27327
27328 __setup("nonmi_ipi", nonmi_ipi_setup);
27329
27330-struct smp_ops smp_ops = {
27331+struct smp_ops smp_ops __read_only = {
27332 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
27333 .smp_prepare_cpus = native_smp_prepare_cpus,
27334 .smp_cpus_done = native_smp_cpus_done,
27335diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
27336index febc6aa..37d8edf 100644
27337--- a/arch/x86/kernel/smpboot.c
27338+++ b/arch/x86/kernel/smpboot.c
27339@@ -229,14 +229,17 @@ static void notrace start_secondary(void *unused)
27340
27341 enable_start_cpu0 = 0;
27342
27343-#ifdef CONFIG_X86_32
27344+ /* otherwise gcc will move up smp_processor_id before the cpu_init */
27345+ barrier();
27346+
27347 /* switch away from the initial page table */
27348+#ifdef CONFIG_PAX_PER_CPU_PGD
27349+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
27350+#else
27351 load_cr3(swapper_pg_dir);
27352+#endif
27353 __flush_tlb_all();
27354-#endif
27355
27356- /* otherwise gcc will move up smp_processor_id before the cpu_init */
27357- barrier();
27358 /*
27359 * Check TSC synchronization with the BP:
27360 */
27361@@ -800,8 +803,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27362 alternatives_enable_smp();
27363
27364 idle->thread.sp = (unsigned long) (((struct pt_regs *)
27365- (THREAD_SIZE + task_stack_page(idle))) - 1);
27366+ (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
27367 per_cpu(current_task, cpu) = idle;
27368+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27369
27370 #ifdef CONFIG_X86_32
27371 /* Stack for startup_32 can be just as for start_secondary onwards */
27372@@ -810,10 +814,10 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27373 clear_tsk_thread_flag(idle, TIF_FORK);
27374 initial_gs = per_cpu_offset(cpu);
27375 #endif
27376- per_cpu(kernel_stack, cpu) =
27377- (unsigned long)task_stack_page(idle) -
27378- KERNEL_STACK_OFFSET + THREAD_SIZE;
27379+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27380+ pax_open_kernel();
27381 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
27382+ pax_close_kernel();
27383 initial_code = (unsigned long)start_secondary;
27384 stack_start = idle->thread.sp;
27385
27386@@ -953,6 +957,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
27387 /* the FPU context is blank, nobody can own it */
27388 __cpu_disable_lazy_restore(cpu);
27389
27390+#ifdef CONFIG_PAX_PER_CPU_PGD
27391+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
27392+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27393+ KERNEL_PGD_PTRS);
27394+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
27395+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27396+ KERNEL_PGD_PTRS);
27397+#endif
27398+
27399 err = do_boot_cpu(apicid, cpu, tidle);
27400 if (err) {
27401 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
27402diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
27403index 9b4d51d..5d28b58 100644
27404--- a/arch/x86/kernel/step.c
27405+++ b/arch/x86/kernel/step.c
27406@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27407 struct desc_struct *desc;
27408 unsigned long base;
27409
27410- seg &= ~7UL;
27411+ seg >>= 3;
27412
27413 mutex_lock(&child->mm->context.lock);
27414- if (unlikely((seg >> 3) >= child->mm->context.size))
27415+ if (unlikely(seg >= child->mm->context.size))
27416 addr = -1L; /* bogus selector, access would fault */
27417 else {
27418 desc = child->mm->context.ldt + seg;
27419@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27420 addr += base;
27421 }
27422 mutex_unlock(&child->mm->context.lock);
27423- }
27424+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
27425+ addr = ktla_ktva(addr);
27426
27427 return addr;
27428 }
27429@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
27430 unsigned char opcode[15];
27431 unsigned long addr = convert_ip_to_linear(child, regs);
27432
27433+ if (addr == -EINVAL)
27434+ return 0;
27435+
27436 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
27437 for (i = 0; i < copied; i++) {
27438 switch (opcode[i]) {
27439diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
27440new file mode 100644
27441index 0000000..5877189
27442--- /dev/null
27443+++ b/arch/x86/kernel/sys_i386_32.c
27444@@ -0,0 +1,189 @@
27445+/*
27446+ * This file contains various random system calls that
27447+ * have a non-standard calling sequence on the Linux/i386
27448+ * platform.
27449+ */
27450+
27451+#include <linux/errno.h>
27452+#include <linux/sched.h>
27453+#include <linux/mm.h>
27454+#include <linux/fs.h>
27455+#include <linux/smp.h>
27456+#include <linux/sem.h>
27457+#include <linux/msg.h>
27458+#include <linux/shm.h>
27459+#include <linux/stat.h>
27460+#include <linux/syscalls.h>
27461+#include <linux/mman.h>
27462+#include <linux/file.h>
27463+#include <linux/utsname.h>
27464+#include <linux/ipc.h>
27465+#include <linux/elf.h>
27466+
27467+#include <linux/uaccess.h>
27468+#include <linux/unistd.h>
27469+
27470+#include <asm/syscalls.h>
27471+
27472+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
27473+{
27474+ unsigned long pax_task_size = TASK_SIZE;
27475+
27476+#ifdef CONFIG_PAX_SEGMEXEC
27477+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
27478+ pax_task_size = SEGMEXEC_TASK_SIZE;
27479+#endif
27480+
27481+ if (flags & MAP_FIXED)
27482+ if (len > pax_task_size || addr > pax_task_size - len)
27483+ return -EINVAL;
27484+
27485+ return 0;
27486+}
27487+
27488+/*
27489+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
27490+ */
27491+static unsigned long get_align_mask(void)
27492+{
27493+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
27494+ return 0;
27495+
27496+ if (!(current->flags & PF_RANDOMIZE))
27497+ return 0;
27498+
27499+ return va_align.mask;
27500+}
27501+
27502+unsigned long
27503+arch_get_unmapped_area(struct file *filp, unsigned long addr,
27504+ unsigned long len, unsigned long pgoff, unsigned long flags)
27505+{
27506+ struct mm_struct *mm = current->mm;
27507+ struct vm_area_struct *vma;
27508+ unsigned long pax_task_size = TASK_SIZE;
27509+ struct vm_unmapped_area_info info;
27510+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27511+
27512+#ifdef CONFIG_PAX_SEGMEXEC
27513+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27514+ pax_task_size = SEGMEXEC_TASK_SIZE;
27515+#endif
27516+
27517+ pax_task_size -= PAGE_SIZE;
27518+
27519+ if (len > pax_task_size)
27520+ return -ENOMEM;
27521+
27522+ if (flags & MAP_FIXED)
27523+ return addr;
27524+
27525+#ifdef CONFIG_PAX_RANDMMAP
27526+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27527+#endif
27528+
27529+ if (addr) {
27530+ addr = PAGE_ALIGN(addr);
27531+ if (pax_task_size - len >= addr) {
27532+ vma = find_vma(mm, addr);
27533+ if (check_heap_stack_gap(vma, addr, len, offset))
27534+ return addr;
27535+ }
27536+ }
27537+
27538+ info.flags = 0;
27539+ info.length = len;
27540+ info.align_mask = filp ? get_align_mask() : 0;
27541+ info.align_offset = pgoff << PAGE_SHIFT;
27542+ info.threadstack_offset = offset;
27543+
27544+#ifdef CONFIG_PAX_PAGEEXEC
27545+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
27546+ info.low_limit = 0x00110000UL;
27547+ info.high_limit = mm->start_code;
27548+
27549+#ifdef CONFIG_PAX_RANDMMAP
27550+ if (mm->pax_flags & MF_PAX_RANDMMAP)
27551+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
27552+#endif
27553+
27554+ if (info.low_limit < info.high_limit) {
27555+ addr = vm_unmapped_area(&info);
27556+ if (!IS_ERR_VALUE(addr))
27557+ return addr;
27558+ }
27559+ } else
27560+#endif
27561+
27562+ info.low_limit = mm->mmap_base;
27563+ info.high_limit = pax_task_size;
27564+
27565+ return vm_unmapped_area(&info);
27566+}
27567+
27568+unsigned long
27569+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27570+ const unsigned long len, const unsigned long pgoff,
27571+ const unsigned long flags)
27572+{
27573+ struct vm_area_struct *vma;
27574+ struct mm_struct *mm = current->mm;
27575+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
27576+ struct vm_unmapped_area_info info;
27577+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27578+
27579+#ifdef CONFIG_PAX_SEGMEXEC
27580+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27581+ pax_task_size = SEGMEXEC_TASK_SIZE;
27582+#endif
27583+
27584+ pax_task_size -= PAGE_SIZE;
27585+
27586+ /* requested length too big for entire address space */
27587+ if (len > pax_task_size)
27588+ return -ENOMEM;
27589+
27590+ if (flags & MAP_FIXED)
27591+ return addr;
27592+
27593+#ifdef CONFIG_PAX_PAGEEXEC
27594+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
27595+ goto bottomup;
27596+#endif
27597+
27598+#ifdef CONFIG_PAX_RANDMMAP
27599+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27600+#endif
27601+
27602+ /* requesting a specific address */
27603+ if (addr) {
27604+ addr = PAGE_ALIGN(addr);
27605+ if (pax_task_size - len >= addr) {
27606+ vma = find_vma(mm, addr);
27607+ if (check_heap_stack_gap(vma, addr, len, offset))
27608+ return addr;
27609+ }
27610+ }
27611+
27612+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
27613+ info.length = len;
27614+ info.low_limit = PAGE_SIZE;
27615+ info.high_limit = mm->mmap_base;
27616+ info.align_mask = filp ? get_align_mask() : 0;
27617+ info.align_offset = pgoff << PAGE_SHIFT;
27618+ info.threadstack_offset = offset;
27619+
27620+ addr = vm_unmapped_area(&info);
27621+ if (!(addr & ~PAGE_MASK))
27622+ return addr;
27623+ VM_BUG_ON(addr != -ENOMEM);
27624+
27625+bottomup:
27626+ /*
27627+ * A failed mmap() very likely causes application failure,
27628+ * so fall back to the bottom-up function here. This scenario
27629+ * can happen with large stack limits and large mmap()
27630+ * allocations.
27631+ */
27632+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
27633+}
27634diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
27635index 30277e2..5664a29 100644
27636--- a/arch/x86/kernel/sys_x86_64.c
27637+++ b/arch/x86/kernel/sys_x86_64.c
27638@@ -81,8 +81,8 @@ out:
27639 return error;
27640 }
27641
27642-static void find_start_end(unsigned long flags, unsigned long *begin,
27643- unsigned long *end)
27644+static void find_start_end(struct mm_struct *mm, unsigned long flags,
27645+ unsigned long *begin, unsigned long *end)
27646 {
27647 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
27648 unsigned long new_begin;
27649@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
27650 *begin = new_begin;
27651 }
27652 } else {
27653- *begin = current->mm->mmap_legacy_base;
27654+ *begin = mm->mmap_legacy_base;
27655 *end = TASK_SIZE;
27656 }
27657 }
27658@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27659 struct vm_area_struct *vma;
27660 struct vm_unmapped_area_info info;
27661 unsigned long begin, end;
27662+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27663
27664 if (flags & MAP_FIXED)
27665 return addr;
27666
27667- find_start_end(flags, &begin, &end);
27668+ find_start_end(mm, flags, &begin, &end);
27669
27670 if (len > end)
27671 return -ENOMEM;
27672
27673+#ifdef CONFIG_PAX_RANDMMAP
27674+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27675+#endif
27676+
27677 if (addr) {
27678 addr = PAGE_ALIGN(addr);
27679 vma = find_vma(mm, addr);
27680- if (end - len >= addr &&
27681- (!vma || addr + len <= vma->vm_start))
27682+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27683 return addr;
27684 }
27685
27686@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27687 info.high_limit = end;
27688 info.align_mask = filp ? get_align_mask() : 0;
27689 info.align_offset = pgoff << PAGE_SHIFT;
27690+ info.threadstack_offset = offset;
27691 return vm_unmapped_area(&info);
27692 }
27693
27694@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27695 struct mm_struct *mm = current->mm;
27696 unsigned long addr = addr0;
27697 struct vm_unmapped_area_info info;
27698+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27699
27700 /* requested length too big for entire address space */
27701 if (len > TASK_SIZE)
27702@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27703 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
27704 goto bottomup;
27705
27706+#ifdef CONFIG_PAX_RANDMMAP
27707+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27708+#endif
27709+
27710 /* requesting a specific address */
27711 if (addr) {
27712 addr = PAGE_ALIGN(addr);
27713 vma = find_vma(mm, addr);
27714- if (TASK_SIZE - len >= addr &&
27715- (!vma || addr + len <= vma->vm_start))
27716+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27717 return addr;
27718 }
27719
27720@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27721 info.high_limit = mm->mmap_base;
27722 info.align_mask = filp ? get_align_mask() : 0;
27723 info.align_offset = pgoff << PAGE_SHIFT;
27724+ info.threadstack_offset = offset;
27725 addr = vm_unmapped_area(&info);
27726 if (!(addr & ~PAGE_MASK))
27727 return addr;
27728diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
27729index 91a4496..42fc304 100644
27730--- a/arch/x86/kernel/tboot.c
27731+++ b/arch/x86/kernel/tboot.c
27732@@ -44,6 +44,7 @@
27733 #include <asm/setup.h>
27734 #include <asm/e820.h>
27735 #include <asm/io.h>
27736+#include <asm/tlbflush.h>
27737
27738 #include "../realmode/rm/wakeup.h"
27739
27740@@ -221,7 +222,7 @@ static int tboot_setup_sleep(void)
27741
27742 void tboot_shutdown(u32 shutdown_type)
27743 {
27744- void (*shutdown)(void);
27745+ void (* __noreturn shutdown)(void);
27746
27747 if (!tboot_enabled())
27748 return;
27749@@ -242,8 +243,9 @@ void tboot_shutdown(u32 shutdown_type)
27750 tboot->shutdown_type = shutdown_type;
27751
27752 switch_to_tboot_pt();
27753+ cr4_clear_bits(X86_CR4_PCIDE);
27754
27755- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
27756+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
27757 shutdown();
27758
27759 /* should not reach here */
27760@@ -310,7 +312,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
27761 return -ENODEV;
27762 }
27763
27764-static atomic_t ap_wfs_count;
27765+static atomic_unchecked_t ap_wfs_count;
27766
27767 static int tboot_wait_for_aps(int num_aps)
27768 {
27769@@ -334,9 +336,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
27770 {
27771 switch (action) {
27772 case CPU_DYING:
27773- atomic_inc(&ap_wfs_count);
27774+ atomic_inc_unchecked(&ap_wfs_count);
27775 if (num_online_cpus() == 1)
27776- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
27777+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
27778 return NOTIFY_BAD;
27779 break;
27780 }
27781@@ -422,7 +424,7 @@ static __init int tboot_late_init(void)
27782
27783 tboot_create_trampoline();
27784
27785- atomic_set(&ap_wfs_count, 0);
27786+ atomic_set_unchecked(&ap_wfs_count, 0);
27787 register_hotcpu_notifier(&tboot_cpu_notifier);
27788
27789 #ifdef CONFIG_DEBUG_FS
27790diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
27791index 25adc0e..1df4349 100644
27792--- a/arch/x86/kernel/time.c
27793+++ b/arch/x86/kernel/time.c
27794@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
27795 {
27796 unsigned long pc = instruction_pointer(regs);
27797
27798- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
27799+ if (!user_mode(regs) && in_lock_functions(pc)) {
27800 #ifdef CONFIG_FRAME_POINTER
27801- return *(unsigned long *)(regs->bp + sizeof(long));
27802+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
27803 #else
27804 unsigned long *sp =
27805 (unsigned long *)kernel_stack_pointer(regs);
27806@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
27807 * or above a saved flags. Eflags has bits 22-31 zero,
27808 * kernel addresses don't.
27809 */
27810+
27811+#ifdef CONFIG_PAX_KERNEXEC
27812+ return ktla_ktva(sp[0]);
27813+#else
27814 if (sp[0] >> 22)
27815 return sp[0];
27816 if (sp[1] >> 22)
27817 return sp[1];
27818 #endif
27819+
27820+#endif
27821 }
27822 return pc;
27823 }
27824diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
27825index 7fc5e84..c6e445a 100644
27826--- a/arch/x86/kernel/tls.c
27827+++ b/arch/x86/kernel/tls.c
27828@@ -139,6 +139,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
27829 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
27830 return -EINVAL;
27831
27832+#ifdef CONFIG_PAX_SEGMEXEC
27833+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
27834+ return -EINVAL;
27835+#endif
27836+
27837 set_tls_desc(p, idx, &info, 1);
27838
27839 return 0;
27840@@ -256,7 +261,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
27841
27842 if (kbuf)
27843 info = kbuf;
27844- else if (__copy_from_user(infobuf, ubuf, count))
27845+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
27846 return -EFAULT;
27847 else
27848 info = infobuf;
27849diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
27850index 1c113db..287b42e 100644
27851--- a/arch/x86/kernel/tracepoint.c
27852+++ b/arch/x86/kernel/tracepoint.c
27853@@ -9,11 +9,11 @@
27854 #include <linux/atomic.h>
27855
27856 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
27857-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27858+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27859 (unsigned long) trace_idt_table };
27860
27861 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27862-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
27863+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
27864
27865 static int trace_irq_vector_refcount;
27866 static DEFINE_MUTEX(irq_vector_mutex);
27867diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
27868index 4ff5d16..736e3e1 100644
27869--- a/arch/x86/kernel/traps.c
27870+++ b/arch/x86/kernel/traps.c
27871@@ -68,7 +68,7 @@
27872 #include <asm/proto.h>
27873
27874 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27875-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
27876+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
27877 #else
27878 #include <asm/processor-flags.h>
27879 #include <asm/setup.h>
27880@@ -77,7 +77,7 @@ asmlinkage int system_call(void);
27881 #endif
27882
27883 /* Must be page-aligned because the real IDT is used in a fixmap. */
27884-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
27885+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
27886
27887 DECLARE_BITMAP(used_vectors, NR_VECTORS);
27888 EXPORT_SYMBOL_GPL(used_vectors);
27889@@ -112,7 +112,7 @@ enum ctx_state ist_enter(struct pt_regs *regs)
27890 {
27891 enum ctx_state prev_state;
27892
27893- if (user_mode_vm(regs)) {
27894+ if (user_mode(regs)) {
27895 /* Other than that, we're just an exception. */
27896 prev_state = exception_enter();
27897 } else {
27898@@ -146,7 +146,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
27899 /* Must be before exception_exit. */
27900 preempt_count_sub(HARDIRQ_OFFSET);
27901
27902- if (user_mode_vm(regs))
27903+ if (user_mode(regs))
27904 return exception_exit(prev_state);
27905 else
27906 rcu_nmi_exit();
27907@@ -158,7 +158,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
27908 *
27909 * IST exception handlers normally cannot schedule. As a special
27910 * exception, if the exception interrupted userspace code (i.e.
27911- * user_mode_vm(regs) would return true) and the exception was not
27912+ * user_mode(regs) would return true) and the exception was not
27913 * a double fault, it can be safe to schedule. ist_begin_non_atomic()
27914 * begins a non-atomic section within an ist_enter()/ist_exit() region.
27915 * Callers are responsible for enabling interrupts themselves inside
27916@@ -167,7 +167,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
27917 */
27918 void ist_begin_non_atomic(struct pt_regs *regs)
27919 {
27920- BUG_ON(!user_mode_vm(regs));
27921+ BUG_ON(!user_mode(regs));
27922
27923 /*
27924 * Sanity check: we need to be on the normal thread stack. This
27925@@ -191,11 +191,11 @@ void ist_end_non_atomic(void)
27926 }
27927
27928 static nokprobe_inline int
27929-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27930+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
27931 struct pt_regs *regs, long error_code)
27932 {
27933 #ifdef CONFIG_X86_32
27934- if (regs->flags & X86_VM_MASK) {
27935+ if (v8086_mode(regs)) {
27936 /*
27937 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
27938 * On nmi (interrupt 2), do_trap should not be called.
27939@@ -208,12 +208,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27940 return -1;
27941 }
27942 #endif
27943- if (!user_mode(regs)) {
27944+ if (!user_mode_novm(regs)) {
27945 if (!fixup_exception(regs)) {
27946 tsk->thread.error_code = error_code;
27947 tsk->thread.trap_nr = trapnr;
27948+
27949+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27950+ if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
27951+ str = "PAX: suspicious stack segment fault";
27952+#endif
27953+
27954 die(str, regs, error_code);
27955 }
27956+
27957+#ifdef CONFIG_PAX_REFCOUNT
27958+ if (trapnr == X86_TRAP_OF)
27959+ pax_report_refcount_overflow(regs);
27960+#endif
27961+
27962 return 0;
27963 }
27964
27965@@ -252,7 +264,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
27966 }
27967
27968 static void
27969-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27970+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
27971 long error_code, siginfo_t *info)
27972 {
27973 struct task_struct *tsk = current;
27974@@ -276,7 +288,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27975 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
27976 printk_ratelimit()) {
27977 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
27978- tsk->comm, tsk->pid, str,
27979+ tsk->comm, task_pid_nr(tsk), str,
27980 regs->ip, regs->sp, error_code);
27981 print_vma_addr(" in ", regs->ip);
27982 pr_cont("\n");
27983@@ -358,6 +370,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
27984 tsk->thread.error_code = error_code;
27985 tsk->thread.trap_nr = X86_TRAP_DF;
27986
27987+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
27988+ if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
27989+ die("grsec: kernel stack overflow detected", regs, error_code);
27990+#endif
27991+
27992 #ifdef CONFIG_DOUBLEFAULT
27993 df_debug(regs, error_code);
27994 #endif
27995@@ -384,7 +401,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
27996 goto exit;
27997 conditional_sti(regs);
27998
27999- if (!user_mode_vm(regs))
28000+ if (!user_mode(regs))
28001 die("bounds", regs, error_code);
28002
28003 if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
28004@@ -463,7 +480,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
28005 conditional_sti(regs);
28006
28007 #ifdef CONFIG_X86_32
28008- if (regs->flags & X86_VM_MASK) {
28009+ if (v8086_mode(regs)) {
28010 local_irq_enable();
28011 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
28012 goto exit;
28013@@ -471,18 +488,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
28014 #endif
28015
28016 tsk = current;
28017- if (!user_mode(regs)) {
28018+ if (!user_mode_novm(regs)) {
28019 if (fixup_exception(regs))
28020 goto exit;
28021
28022 tsk->thread.error_code = error_code;
28023 tsk->thread.trap_nr = X86_TRAP_GP;
28024 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
28025- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
28026+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
28027+
28028+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28029+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
28030+ die("PAX: suspicious general protection fault", regs, error_code);
28031+ else
28032+#endif
28033+
28034 die("general protection fault", regs, error_code);
28035+ }
28036 goto exit;
28037 }
28038
28039+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
28040+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
28041+ struct mm_struct *mm = tsk->mm;
28042+ unsigned long limit;
28043+
28044+ down_write(&mm->mmap_sem);
28045+ limit = mm->context.user_cs_limit;
28046+ if (limit < TASK_SIZE) {
28047+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
28048+ up_write(&mm->mmap_sem);
28049+ return;
28050+ }
28051+ up_write(&mm->mmap_sem);
28052+ }
28053+#endif
28054+
28055 tsk->thread.error_code = error_code;
28056 tsk->thread.trap_nr = X86_TRAP_GP;
28057
28058@@ -581,13 +622,16 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
28059 container_of(task_pt_regs(current),
28060 struct bad_iret_stack, regs);
28061
28062+ if ((current->thread.sp0 ^ (unsigned long)s) < THREAD_SIZE)
28063+ new_stack = s;
28064+
28065 /* Copy the IRET target to the new stack. */
28066 memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
28067
28068 /* Copy the remainder of the stack from the current stack. */
28069 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
28070
28071- BUG_ON(!user_mode_vm(&new_stack->regs));
28072+ BUG_ON(!user_mode(&new_stack->regs));
28073 return new_stack;
28074 }
28075 NOKPROBE_SYMBOL(fixup_bad_iret);
28076@@ -637,7 +681,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28077 * then it's very likely the result of an icebp/int01 trap.
28078 * User wants a sigtrap for that.
28079 */
28080- if (!dr6 && user_mode_vm(regs))
28081+ if (!dr6 && user_mode(regs))
28082 user_icebp = 1;
28083
28084 /* Catch kmemcheck conditions first of all! */
28085@@ -673,7 +717,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28086 /* It's safe to allow irq's after DR6 has been saved */
28087 preempt_conditional_sti(regs);
28088
28089- if (regs->flags & X86_VM_MASK) {
28090+ if (v8086_mode(regs)) {
28091 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
28092 X86_TRAP_DB);
28093 preempt_conditional_cli(regs);
28094@@ -688,7 +732,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28095 * We already checked v86 mode above, so we can check for kernel mode
28096 * by just checking the CPL of CS.
28097 */
28098- if ((dr6 & DR_STEP) && !user_mode(regs)) {
28099+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
28100 tsk->thread.debugreg6 &= ~DR_STEP;
28101 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
28102 regs->flags &= ~X86_EFLAGS_TF;
28103@@ -721,7 +765,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
28104 return;
28105 conditional_sti(regs);
28106
28107- if (!user_mode_vm(regs))
28108+ if (!user_mode(regs))
28109 {
28110 if (!fixup_exception(regs)) {
28111 task->thread.error_code = error_code;
28112diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
28113index 5054497..139f8f8 100644
28114--- a/arch/x86/kernel/tsc.c
28115+++ b/arch/x86/kernel/tsc.c
28116@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
28117 */
28118 smp_wmb();
28119
28120- ACCESS_ONCE(c2n->head) = data;
28121+ ACCESS_ONCE_RW(c2n->head) = data;
28122 }
28123
28124 /*
28125diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
28126index 81f8adb0..fff670e 100644
28127--- a/arch/x86/kernel/uprobes.c
28128+++ b/arch/x86/kernel/uprobes.c
28129@@ -912,7 +912,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
28130 int ret = NOTIFY_DONE;
28131
28132 /* We are only interested in userspace traps */
28133- if (regs && !user_mode_vm(regs))
28134+ if (regs && !user_mode(regs))
28135 return NOTIFY_DONE;
28136
28137 switch (val) {
28138@@ -986,7 +986,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
28139
28140 if (nleft != rasize) {
28141 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
28142- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
28143+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
28144
28145 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
28146 }
28147diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
28148index b9242ba..50c5edd 100644
28149--- a/arch/x86/kernel/verify_cpu.S
28150+++ b/arch/x86/kernel/verify_cpu.S
28151@@ -20,6 +20,7 @@
28152 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
28153 * arch/x86/kernel/trampoline_64.S: secondary processor verification
28154 * arch/x86/kernel/head_32.S: processor startup
28155+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
28156 *
28157 * verify_cpu, returns the status of longmode and SSE in register %eax.
28158 * 0: Success 1: Failure
28159diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
28160index e8edcf5..27f9344 100644
28161--- a/arch/x86/kernel/vm86_32.c
28162+++ b/arch/x86/kernel/vm86_32.c
28163@@ -44,6 +44,7 @@
28164 #include <linux/ptrace.h>
28165 #include <linux/audit.h>
28166 #include <linux/stddef.h>
28167+#include <linux/grsecurity.h>
28168
28169 #include <asm/uaccess.h>
28170 #include <asm/io.h>
28171@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
28172 do_exit(SIGSEGV);
28173 }
28174
28175- tss = &per_cpu(init_tss, get_cpu());
28176+ tss = init_tss + get_cpu();
28177 current->thread.sp0 = current->thread.saved_sp0;
28178 current->thread.sysenter_cs = __KERNEL_CS;
28179 load_sp0(tss, &current->thread);
28180@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
28181
28182 if (tsk->thread.saved_sp0)
28183 return -EPERM;
28184+
28185+#ifdef CONFIG_GRKERNSEC_VM86
28186+ if (!capable(CAP_SYS_RAWIO)) {
28187+ gr_handle_vm86();
28188+ return -EPERM;
28189+ }
28190+#endif
28191+
28192 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
28193 offsetof(struct kernel_vm86_struct, vm86plus) -
28194 sizeof(info.regs));
28195@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
28196 int tmp;
28197 struct vm86plus_struct __user *v86;
28198
28199+#ifdef CONFIG_GRKERNSEC_VM86
28200+ if (!capable(CAP_SYS_RAWIO)) {
28201+ gr_handle_vm86();
28202+ return -EPERM;
28203+ }
28204+#endif
28205+
28206 tsk = current;
28207 switch (cmd) {
28208 case VM86_REQUEST_IRQ:
28209@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
28210 tsk->thread.saved_fs = info->regs32->fs;
28211 tsk->thread.saved_gs = get_user_gs(info->regs32);
28212
28213- tss = &per_cpu(init_tss, get_cpu());
28214+ tss = init_tss + get_cpu();
28215 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
28216 if (cpu_has_sep)
28217 tsk->thread.sysenter_cs = 0;
28218@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
28219 goto cannot_handle;
28220 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
28221 goto cannot_handle;
28222- intr_ptr = (unsigned long __user *) (i << 2);
28223+ intr_ptr = (__force unsigned long __user *) (i << 2);
28224 if (get_user(segoffs, intr_ptr))
28225 goto cannot_handle;
28226 if ((segoffs >> 16) == BIOSSEG)
28227diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
28228index 00bf300..129df8e 100644
28229--- a/arch/x86/kernel/vmlinux.lds.S
28230+++ b/arch/x86/kernel/vmlinux.lds.S
28231@@ -26,6 +26,13 @@
28232 #include <asm/page_types.h>
28233 #include <asm/cache.h>
28234 #include <asm/boot.h>
28235+#include <asm/segment.h>
28236+
28237+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28238+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
28239+#else
28240+#define __KERNEL_TEXT_OFFSET 0
28241+#endif
28242
28243 #undef i386 /* in case the preprocessor is a 32bit one */
28244
28245@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
28246
28247 PHDRS {
28248 text PT_LOAD FLAGS(5); /* R_E */
28249+#ifdef CONFIG_X86_32
28250+ module PT_LOAD FLAGS(5); /* R_E */
28251+#endif
28252+#ifdef CONFIG_XEN
28253+ rodata PT_LOAD FLAGS(5); /* R_E */
28254+#else
28255+ rodata PT_LOAD FLAGS(4); /* R__ */
28256+#endif
28257 data PT_LOAD FLAGS(6); /* RW_ */
28258-#ifdef CONFIG_X86_64
28259+ init.begin PT_LOAD FLAGS(6); /* RW_ */
28260 #ifdef CONFIG_SMP
28261 percpu PT_LOAD FLAGS(6); /* RW_ */
28262 #endif
28263+ text.init PT_LOAD FLAGS(5); /* R_E */
28264+ text.exit PT_LOAD FLAGS(5); /* R_E */
28265 init PT_LOAD FLAGS(7); /* RWE */
28266-#endif
28267 note PT_NOTE FLAGS(0); /* ___ */
28268 }
28269
28270 SECTIONS
28271 {
28272 #ifdef CONFIG_X86_32
28273- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
28274- phys_startup_32 = startup_32 - LOAD_OFFSET;
28275+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
28276 #else
28277- . = __START_KERNEL;
28278- phys_startup_64 = startup_64 - LOAD_OFFSET;
28279+ . = __START_KERNEL;
28280 #endif
28281
28282 /* Text and read-only data */
28283- .text : AT(ADDR(.text) - LOAD_OFFSET) {
28284- _text = .;
28285+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28286 /* bootstrapping code */
28287+#ifdef CONFIG_X86_32
28288+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28289+#else
28290+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28291+#endif
28292+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28293+ _text = .;
28294 HEAD_TEXT
28295 . = ALIGN(8);
28296 _stext = .;
28297@@ -104,13 +124,47 @@ SECTIONS
28298 IRQENTRY_TEXT
28299 *(.fixup)
28300 *(.gnu.warning)
28301- /* End of text section */
28302- _etext = .;
28303 } :text = 0x9090
28304
28305- NOTES :text :note
28306+ . += __KERNEL_TEXT_OFFSET;
28307
28308- EXCEPTION_TABLE(16) :text = 0x9090
28309+#ifdef CONFIG_X86_32
28310+ . = ALIGN(PAGE_SIZE);
28311+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
28312+
28313+#ifdef CONFIG_PAX_KERNEXEC
28314+ MODULES_EXEC_VADDR = .;
28315+ BYTE(0)
28316+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
28317+ . = ALIGN(HPAGE_SIZE) - 1;
28318+ MODULES_EXEC_END = .;
28319+#endif
28320+
28321+ } :module
28322+#endif
28323+
28324+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
28325+ /* End of text section */
28326+ BYTE(0)
28327+ _etext = . - __KERNEL_TEXT_OFFSET;
28328+ }
28329+
28330+#ifdef CONFIG_X86_32
28331+ . = ALIGN(PAGE_SIZE);
28332+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
28333+ . = ALIGN(PAGE_SIZE);
28334+ *(.empty_zero_page)
28335+ *(.initial_pg_fixmap)
28336+ *(.initial_pg_pmd)
28337+ *(.initial_page_table)
28338+ *(.swapper_pg_dir)
28339+ } :rodata
28340+#endif
28341+
28342+ . = ALIGN(PAGE_SIZE);
28343+ NOTES :rodata :note
28344+
28345+ EXCEPTION_TABLE(16) :rodata
28346
28347 #if defined(CONFIG_DEBUG_RODATA)
28348 /* .text should occupy whole number of pages */
28349@@ -122,16 +176,20 @@ SECTIONS
28350
28351 /* Data */
28352 .data : AT(ADDR(.data) - LOAD_OFFSET) {
28353+
28354+#ifdef CONFIG_PAX_KERNEXEC
28355+ . = ALIGN(HPAGE_SIZE);
28356+#else
28357+ . = ALIGN(PAGE_SIZE);
28358+#endif
28359+
28360 /* Start of data section */
28361 _sdata = .;
28362
28363 /* init_task */
28364 INIT_TASK_DATA(THREAD_SIZE)
28365
28366-#ifdef CONFIG_X86_32
28367- /* 32 bit has nosave before _edata */
28368 NOSAVE_DATA
28369-#endif
28370
28371 PAGE_ALIGNED_DATA(PAGE_SIZE)
28372
28373@@ -174,12 +232,19 @@ SECTIONS
28374 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
28375
28376 /* Init code and data - will be freed after init */
28377- . = ALIGN(PAGE_SIZE);
28378 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
28379+ BYTE(0)
28380+
28381+#ifdef CONFIG_PAX_KERNEXEC
28382+ . = ALIGN(HPAGE_SIZE);
28383+#else
28384+ . = ALIGN(PAGE_SIZE);
28385+#endif
28386+
28387 __init_begin = .; /* paired with __init_end */
28388- }
28389+ } :init.begin
28390
28391-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
28392+#ifdef CONFIG_SMP
28393 /*
28394 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
28395 * output PHDR, so the next output section - .init.text - should
28396@@ -190,12 +255,27 @@ SECTIONS
28397 "per-CPU data too large - increase CONFIG_PHYSICAL_START")
28398 #endif
28399
28400- INIT_TEXT_SECTION(PAGE_SIZE)
28401-#ifdef CONFIG_X86_64
28402- :init
28403-#endif
28404+ . = ALIGN(PAGE_SIZE);
28405+ init_begin = .;
28406+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
28407+ VMLINUX_SYMBOL(_sinittext) = .;
28408+ INIT_TEXT
28409+ . = ALIGN(PAGE_SIZE);
28410+ } :text.init
28411
28412- INIT_DATA_SECTION(16)
28413+ /*
28414+ * .exit.text is discard at runtime, not link time, to deal with
28415+ * references from .altinstructions and .eh_frame
28416+ */
28417+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28418+ EXIT_TEXT
28419+ VMLINUX_SYMBOL(_einittext) = .;
28420+ . = ALIGN(16);
28421+ } :text.exit
28422+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
28423+
28424+ . = ALIGN(PAGE_SIZE);
28425+ INIT_DATA_SECTION(16) :init
28426
28427 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
28428 __x86_cpu_dev_start = .;
28429@@ -266,19 +346,12 @@ SECTIONS
28430 }
28431
28432 . = ALIGN(8);
28433- /*
28434- * .exit.text is discard at runtime, not link time, to deal with
28435- * references from .altinstructions and .eh_frame
28436- */
28437- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
28438- EXIT_TEXT
28439- }
28440
28441 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
28442 EXIT_DATA
28443 }
28444
28445-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
28446+#ifndef CONFIG_SMP
28447 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
28448 #endif
28449
28450@@ -297,16 +370,10 @@ SECTIONS
28451 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
28452 __smp_locks = .;
28453 *(.smp_locks)
28454- . = ALIGN(PAGE_SIZE);
28455 __smp_locks_end = .;
28456+ . = ALIGN(PAGE_SIZE);
28457 }
28458
28459-#ifdef CONFIG_X86_64
28460- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
28461- NOSAVE_DATA
28462- }
28463-#endif
28464-
28465 /* BSS */
28466 . = ALIGN(PAGE_SIZE);
28467 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
28468@@ -322,6 +389,7 @@ SECTIONS
28469 __brk_base = .;
28470 . += 64 * 1024; /* 64k alignment slop space */
28471 *(.brk_reservation) /* areas brk users have reserved */
28472+ . = ALIGN(HPAGE_SIZE);
28473 __brk_limit = .;
28474 }
28475
28476@@ -348,13 +416,12 @@ SECTIONS
28477 * for the boot processor.
28478 */
28479 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
28480-INIT_PER_CPU(gdt_page);
28481 INIT_PER_CPU(irq_stack_union);
28482
28483 /*
28484 * Build-time check on the image size:
28485 */
28486-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
28487+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
28488 "kernel image bigger than KERNEL_IMAGE_SIZE");
28489
28490 #ifdef CONFIG_SMP
28491diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
28492index 2dcc6ff..082dc7a 100644
28493--- a/arch/x86/kernel/vsyscall_64.c
28494+++ b/arch/x86/kernel/vsyscall_64.c
28495@@ -38,15 +38,13 @@
28496 #define CREATE_TRACE_POINTS
28497 #include "vsyscall_trace.h"
28498
28499-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
28500+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
28501
28502 static int __init vsyscall_setup(char *str)
28503 {
28504 if (str) {
28505 if (!strcmp("emulate", str))
28506 vsyscall_mode = EMULATE;
28507- else if (!strcmp("native", str))
28508- vsyscall_mode = NATIVE;
28509 else if (!strcmp("none", str))
28510 vsyscall_mode = NONE;
28511 else
28512@@ -264,8 +262,7 @@ do_ret:
28513 return true;
28514
28515 sigsegv:
28516- force_sig(SIGSEGV, current);
28517- return true;
28518+ do_group_exit(SIGKILL);
28519 }
28520
28521 /*
28522@@ -283,8 +280,8 @@ static struct vm_operations_struct gate_vma_ops = {
28523 static struct vm_area_struct gate_vma = {
28524 .vm_start = VSYSCALL_ADDR,
28525 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
28526- .vm_page_prot = PAGE_READONLY_EXEC,
28527- .vm_flags = VM_READ | VM_EXEC,
28528+ .vm_page_prot = PAGE_READONLY,
28529+ .vm_flags = VM_READ,
28530 .vm_ops = &gate_vma_ops,
28531 };
28532
28533@@ -325,10 +322,7 @@ void __init map_vsyscall(void)
28534 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
28535
28536 if (vsyscall_mode != NONE)
28537- __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
28538- vsyscall_mode == NATIVE
28539- ? PAGE_KERNEL_VSYSCALL
28540- : PAGE_KERNEL_VVAR);
28541+ __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
28542
28543 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
28544 (unsigned long)VSYSCALL_ADDR);
28545diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
28546index 37d8fa4..66e319a 100644
28547--- a/arch/x86/kernel/x8664_ksyms_64.c
28548+++ b/arch/x86/kernel/x8664_ksyms_64.c
28549@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
28550 EXPORT_SYMBOL(copy_user_generic_unrolled);
28551 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
28552 EXPORT_SYMBOL(__copy_user_nocache);
28553-EXPORT_SYMBOL(_copy_from_user);
28554-EXPORT_SYMBOL(_copy_to_user);
28555
28556 EXPORT_SYMBOL(copy_page);
28557 EXPORT_SYMBOL(clear_page);
28558@@ -79,3 +77,7 @@ EXPORT_SYMBOL(___preempt_schedule);
28559 EXPORT_SYMBOL(___preempt_schedule_context);
28560 #endif
28561 #endif
28562+
28563+#ifdef CONFIG_PAX_PER_CPU_PGD
28564+EXPORT_SYMBOL(cpu_pgd);
28565+#endif
28566diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
28567index 234b072..b7ab191 100644
28568--- a/arch/x86/kernel/x86_init.c
28569+++ b/arch/x86/kernel/x86_init.c
28570@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
28571 static void default_nmi_init(void) { };
28572 static int default_i8042_detect(void) { return 1; };
28573
28574-struct x86_platform_ops x86_platform = {
28575+struct x86_platform_ops x86_platform __read_only = {
28576 .calibrate_tsc = native_calibrate_tsc,
28577 .get_wallclock = mach_get_cmos_time,
28578 .set_wallclock = mach_set_rtc_mmss,
28579@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
28580 EXPORT_SYMBOL_GPL(x86_platform);
28581
28582 #if defined(CONFIG_PCI_MSI)
28583-struct x86_msi_ops x86_msi = {
28584+struct x86_msi_ops x86_msi __read_only = {
28585 .setup_msi_irqs = native_setup_msi_irqs,
28586 .compose_msi_msg = native_compose_msi_msg,
28587 .teardown_msi_irq = native_teardown_msi_irq,
28588@@ -140,7 +140,7 @@ void arch_restore_msi_irqs(struct pci_dev *dev)
28589 }
28590 #endif
28591
28592-struct x86_io_apic_ops x86_io_apic_ops = {
28593+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
28594 .init = native_io_apic_init_mappings,
28595 .read = native_io_apic_read,
28596 .write = native_io_apic_write,
28597diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
28598index cdc6cf9..e04f495 100644
28599--- a/arch/x86/kernel/xsave.c
28600+++ b/arch/x86/kernel/xsave.c
28601@@ -168,18 +168,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28602
28603 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
28604 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
28605- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28606+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28607
28608 if (!use_xsave())
28609 return err;
28610
28611- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
28612+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
28613
28614 /*
28615 * Read the xstate_bv which we copied (directly from the cpu or
28616 * from the state in task struct) to the user buffers.
28617 */
28618- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28619+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28620
28621 /*
28622 * For legacy compatible, we always set FP/SSE bits in the bit
28623@@ -194,7 +194,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28624 */
28625 xstate_bv |= XSTATE_FPSSE;
28626
28627- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28628+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28629
28630 return err;
28631 }
28632@@ -203,6 +203,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
28633 {
28634 int err;
28635
28636+ buf = (struct xsave_struct __user *)____m(buf);
28637 if (use_xsave())
28638 err = xsave_user(buf);
28639 else if (use_fxsr())
28640@@ -313,6 +314,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
28641 */
28642 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
28643 {
28644+ buf = (void __user *)____m(buf);
28645 if (use_xsave()) {
28646 if ((unsigned long)buf % 64 || fx_only) {
28647 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
28648diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
28649index 8a80737..bac4961 100644
28650--- a/arch/x86/kvm/cpuid.c
28651+++ b/arch/x86/kvm/cpuid.c
28652@@ -182,15 +182,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
28653 struct kvm_cpuid2 *cpuid,
28654 struct kvm_cpuid_entry2 __user *entries)
28655 {
28656- int r;
28657+ int r, i;
28658
28659 r = -E2BIG;
28660 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
28661 goto out;
28662 r = -EFAULT;
28663- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
28664- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28665+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28666 goto out;
28667+ for (i = 0; i < cpuid->nent; ++i) {
28668+ struct kvm_cpuid_entry2 cpuid_entry;
28669+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
28670+ goto out;
28671+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
28672+ }
28673 vcpu->arch.cpuid_nent = cpuid->nent;
28674 kvm_apic_set_version(vcpu);
28675 kvm_x86_ops->cpuid_update(vcpu);
28676@@ -203,15 +208,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
28677 struct kvm_cpuid2 *cpuid,
28678 struct kvm_cpuid_entry2 __user *entries)
28679 {
28680- int r;
28681+ int r, i;
28682
28683 r = -E2BIG;
28684 if (cpuid->nent < vcpu->arch.cpuid_nent)
28685 goto out;
28686 r = -EFAULT;
28687- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
28688- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28689+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28690 goto out;
28691+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
28692+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
28693+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
28694+ goto out;
28695+ }
28696 return 0;
28697
28698 out:
28699diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
28700index 106c015..2db7161 100644
28701--- a/arch/x86/kvm/emulate.c
28702+++ b/arch/x86/kvm/emulate.c
28703@@ -3572,7 +3572,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
28704 int cr = ctxt->modrm_reg;
28705 u64 efer = 0;
28706
28707- static u64 cr_reserved_bits[] = {
28708+ static const u64 cr_reserved_bits[] = {
28709 0xffffffff00000000ULL,
28710 0, 0, 0, /* CR3 checked later */
28711 CR4_RESERVED_BITS,
28712diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
28713index 4ee827d..a14eff9 100644
28714--- a/arch/x86/kvm/lapic.c
28715+++ b/arch/x86/kvm/lapic.c
28716@@ -56,7 +56,7 @@
28717 #define APIC_BUS_CYCLE_NS 1
28718
28719 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
28720-#define apic_debug(fmt, arg...)
28721+#define apic_debug(fmt, arg...) do {} while (0)
28722
28723 #define APIC_LVT_NUM 6
28724 /* 14 is the version for Xeon and Pentium 8.4.8*/
28725diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
28726index fd49c86..77e1aa0 100644
28727--- a/arch/x86/kvm/paging_tmpl.h
28728+++ b/arch/x86/kvm/paging_tmpl.h
28729@@ -343,7 +343,7 @@ retry_walk:
28730 if (unlikely(kvm_is_error_hva(host_addr)))
28731 goto error;
28732
28733- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
28734+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
28735 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
28736 goto error;
28737 walker->ptep_user[walker->level - 1] = ptep_user;
28738diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
28739index cc618c8..3f72f76 100644
28740--- a/arch/x86/kvm/svm.c
28741+++ b/arch/x86/kvm/svm.c
28742@@ -3568,7 +3568,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
28743 int cpu = raw_smp_processor_id();
28744
28745 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
28746+
28747+ pax_open_kernel();
28748 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
28749+ pax_close_kernel();
28750+
28751 load_TR_desc();
28752 }
28753
28754@@ -3964,6 +3968,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
28755 #endif
28756 #endif
28757
28758+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28759+ __set_fs(current_thread_info()->addr_limit);
28760+#endif
28761+
28762 reload_tss(vcpu);
28763
28764 local_irq_disable();
28765diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
28766index a60bd3a..748e856 100644
28767--- a/arch/x86/kvm/vmx.c
28768+++ b/arch/x86/kvm/vmx.c
28769@@ -1440,12 +1440,12 @@ static void vmcs_write64(unsigned long field, u64 value)
28770 #endif
28771 }
28772
28773-static void vmcs_clear_bits(unsigned long field, u32 mask)
28774+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
28775 {
28776 vmcs_writel(field, vmcs_readl(field) & ~mask);
28777 }
28778
28779-static void vmcs_set_bits(unsigned long field, u32 mask)
28780+static void vmcs_set_bits(unsigned long field, unsigned long mask)
28781 {
28782 vmcs_writel(field, vmcs_readl(field) | mask);
28783 }
28784@@ -1705,7 +1705,11 @@ static void reload_tss(void)
28785 struct desc_struct *descs;
28786
28787 descs = (void *)gdt->address;
28788+
28789+ pax_open_kernel();
28790 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
28791+ pax_close_kernel();
28792+
28793 load_TR_desc();
28794 }
28795
28796@@ -1941,6 +1945,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
28797 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
28798 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
28799
28800+#ifdef CONFIG_PAX_PER_CPU_PGD
28801+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28802+#endif
28803+
28804 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
28805 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
28806 vmx->loaded_vmcs->cpu = cpu;
28807@@ -2233,7 +2241,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
28808 * reads and returns guest's timestamp counter "register"
28809 * guest_tsc = host_tsc + tsc_offset -- 21.3
28810 */
28811-static u64 guest_read_tsc(void)
28812+static u64 __intentional_overflow(-1) guest_read_tsc(void)
28813 {
28814 u64 host_tsc, tsc_offset;
28815
28816@@ -4466,7 +4474,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28817 unsigned long cr4;
28818
28819 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
28820+
28821+#ifndef CONFIG_PAX_PER_CPU_PGD
28822 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28823+#endif
28824
28825 /* Save the most likely value for this task's CR4 in the VMCS. */
28826 cr4 = cr4_read_shadow();
28827@@ -4493,7 +4504,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28828 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
28829 vmx->host_idt_base = dt.address;
28830
28831- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
28832+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
28833
28834 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
28835 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
28836@@ -6104,11 +6115,17 @@ static __init int hardware_setup(void)
28837 * page upon invalidation. No need to do anything if not
28838 * using the APIC_ACCESS_ADDR VMCS field.
28839 */
28840- if (!flexpriority_enabled)
28841- kvm_x86_ops->set_apic_access_page_addr = NULL;
28842+ if (!flexpriority_enabled) {
28843+ pax_open_kernel();
28844+ *(void **)&kvm_x86_ops->set_apic_access_page_addr = NULL;
28845+ pax_close_kernel();
28846+ }
28847
28848- if (!cpu_has_vmx_tpr_shadow())
28849- kvm_x86_ops->update_cr8_intercept = NULL;
28850+ if (!cpu_has_vmx_tpr_shadow()) {
28851+ pax_open_kernel();
28852+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28853+ pax_close_kernel();
28854+ }
28855
28856 if (enable_ept && !cpu_has_vmx_ept_2m_page())
28857 kvm_disable_largepages();
28858@@ -6119,14 +6136,16 @@ static __init int hardware_setup(void)
28859 if (!cpu_has_vmx_apicv())
28860 enable_apicv = 0;
28861
28862+ pax_open_kernel();
28863 if (enable_apicv)
28864- kvm_x86_ops->update_cr8_intercept = NULL;
28865+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28866 else {
28867- kvm_x86_ops->hwapic_irr_update = NULL;
28868- kvm_x86_ops->hwapic_isr_update = NULL;
28869- kvm_x86_ops->deliver_posted_interrupt = NULL;
28870- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28871+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
28872+ *(void **)&kvm_x86_ops->hwapic_isr_update = NULL;
28873+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
28874+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28875 }
28876+ pax_close_kernel();
28877
28878 vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
28879 vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
28880@@ -6179,10 +6198,12 @@ static __init int hardware_setup(void)
28881 enable_pml = 0;
28882
28883 if (!enable_pml) {
28884- kvm_x86_ops->slot_enable_log_dirty = NULL;
28885- kvm_x86_ops->slot_disable_log_dirty = NULL;
28886- kvm_x86_ops->flush_log_dirty = NULL;
28887- kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
28888+ pax_open_kernel();
28889+ *(void **)&kvm_x86_ops->slot_enable_log_dirty = NULL;
28890+ *(void **)&kvm_x86_ops->slot_disable_log_dirty = NULL;
28891+ *(void **)&kvm_x86_ops->flush_log_dirty = NULL;
28892+ *(void **)&kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
28893+ pax_close_kernel();
28894 }
28895
28896 return alloc_kvm_area();
28897@@ -8227,6 +8248,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28898 "jmp 2f \n\t"
28899 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
28900 "2: "
28901+
28902+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28903+ "ljmp %[cs],$3f\n\t"
28904+ "3: "
28905+#endif
28906+
28907 /* Save guest registers, load host registers, keep flags */
28908 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
28909 "pop %0 \n\t"
28910@@ -8279,6 +8306,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28911 #endif
28912 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
28913 [wordsize]"i"(sizeof(ulong))
28914+
28915+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28916+ ,[cs]"i"(__KERNEL_CS)
28917+#endif
28918+
28919 : "cc", "memory"
28920 #ifdef CONFIG_X86_64
28921 , "rax", "rbx", "rdi", "rsi"
28922@@ -8292,7 +8324,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28923 if (debugctlmsr)
28924 update_debugctlmsr(debugctlmsr);
28925
28926-#ifndef CONFIG_X86_64
28927+#ifdef CONFIG_X86_32
28928 /*
28929 * The sysexit path does not restore ds/es, so we must set them to
28930 * a reasonable value ourselves.
28931@@ -8301,8 +8333,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28932 * may be executed in interrupt context, which saves and restore segments
28933 * around it, nullifying its effect.
28934 */
28935- loadsegment(ds, __USER_DS);
28936- loadsegment(es, __USER_DS);
28937+ loadsegment(ds, __KERNEL_DS);
28938+ loadsegment(es, __KERNEL_DS);
28939+ loadsegment(ss, __KERNEL_DS);
28940+
28941+#ifdef CONFIG_PAX_KERNEXEC
28942+ loadsegment(fs, __KERNEL_PERCPU);
28943+#endif
28944+
28945+#ifdef CONFIG_PAX_MEMORY_UDEREF
28946+ __set_fs(current_thread_info()->addr_limit);
28947+#endif
28948+
28949 #endif
28950
28951 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
28952diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
28953index e222ba5..6f0f2de 100644
28954--- a/arch/x86/kvm/x86.c
28955+++ b/arch/x86/kvm/x86.c
28956@@ -1897,8 +1897,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
28957 {
28958 struct kvm *kvm = vcpu->kvm;
28959 int lm = is_long_mode(vcpu);
28960- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28961- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28962+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28963+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28964 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
28965 : kvm->arch.xen_hvm_config.blob_size_32;
28966 u32 page_num = data & ~PAGE_MASK;
28967@@ -2835,6 +2835,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
28968 if (n < msr_list.nmsrs)
28969 goto out;
28970 r = -EFAULT;
28971+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
28972+ goto out;
28973 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
28974 num_msrs_to_save * sizeof(u32)))
28975 goto out;
28976@@ -5739,7 +5741,7 @@ static struct notifier_block pvclock_gtod_notifier = {
28977 };
28978 #endif
28979
28980-int kvm_arch_init(void *opaque)
28981+int kvm_arch_init(const void *opaque)
28982 {
28983 int r;
28984 struct kvm_x86_ops *ops = opaque;
28985diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
28986index ac4453d..1f43bf3 100644
28987--- a/arch/x86/lguest/boot.c
28988+++ b/arch/x86/lguest/boot.c
28989@@ -1340,9 +1340,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
28990 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
28991 * Launcher to reboot us.
28992 */
28993-static void lguest_restart(char *reason)
28994+static __noreturn void lguest_restart(char *reason)
28995 {
28996 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
28997+ BUG();
28998 }
28999
29000 /*G:050
29001diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
29002index 00933d5..3a64af9 100644
29003--- a/arch/x86/lib/atomic64_386_32.S
29004+++ b/arch/x86/lib/atomic64_386_32.S
29005@@ -48,6 +48,10 @@ BEGIN(read)
29006 movl (v), %eax
29007 movl 4(v), %edx
29008 RET_ENDP
29009+BEGIN(read_unchecked)
29010+ movl (v), %eax
29011+ movl 4(v), %edx
29012+RET_ENDP
29013 #undef v
29014
29015 #define v %esi
29016@@ -55,6 +59,10 @@ BEGIN(set)
29017 movl %ebx, (v)
29018 movl %ecx, 4(v)
29019 RET_ENDP
29020+BEGIN(set_unchecked)
29021+ movl %ebx, (v)
29022+ movl %ecx, 4(v)
29023+RET_ENDP
29024 #undef v
29025
29026 #define v %esi
29027@@ -70,6 +78,20 @@ RET_ENDP
29028 BEGIN(add)
29029 addl %eax, (v)
29030 adcl %edx, 4(v)
29031+
29032+#ifdef CONFIG_PAX_REFCOUNT
29033+ jno 0f
29034+ subl %eax, (v)
29035+ sbbl %edx, 4(v)
29036+ int $4
29037+0:
29038+ _ASM_EXTABLE(0b, 0b)
29039+#endif
29040+
29041+RET_ENDP
29042+BEGIN(add_unchecked)
29043+ addl %eax, (v)
29044+ adcl %edx, 4(v)
29045 RET_ENDP
29046 #undef v
29047
29048@@ -77,6 +99,24 @@ RET_ENDP
29049 BEGIN(add_return)
29050 addl (v), %eax
29051 adcl 4(v), %edx
29052+
29053+#ifdef CONFIG_PAX_REFCOUNT
29054+ into
29055+1234:
29056+ _ASM_EXTABLE(1234b, 2f)
29057+#endif
29058+
29059+ movl %eax, (v)
29060+ movl %edx, 4(v)
29061+
29062+#ifdef CONFIG_PAX_REFCOUNT
29063+2:
29064+#endif
29065+
29066+RET_ENDP
29067+BEGIN(add_return_unchecked)
29068+ addl (v), %eax
29069+ adcl 4(v), %edx
29070 movl %eax, (v)
29071 movl %edx, 4(v)
29072 RET_ENDP
29073@@ -86,6 +126,20 @@ RET_ENDP
29074 BEGIN(sub)
29075 subl %eax, (v)
29076 sbbl %edx, 4(v)
29077+
29078+#ifdef CONFIG_PAX_REFCOUNT
29079+ jno 0f
29080+ addl %eax, (v)
29081+ adcl %edx, 4(v)
29082+ int $4
29083+0:
29084+ _ASM_EXTABLE(0b, 0b)
29085+#endif
29086+
29087+RET_ENDP
29088+BEGIN(sub_unchecked)
29089+ subl %eax, (v)
29090+ sbbl %edx, 4(v)
29091 RET_ENDP
29092 #undef v
29093
29094@@ -96,6 +150,27 @@ BEGIN(sub_return)
29095 sbbl $0, %edx
29096 addl (v), %eax
29097 adcl 4(v), %edx
29098+
29099+#ifdef CONFIG_PAX_REFCOUNT
29100+ into
29101+1234:
29102+ _ASM_EXTABLE(1234b, 2f)
29103+#endif
29104+
29105+ movl %eax, (v)
29106+ movl %edx, 4(v)
29107+
29108+#ifdef CONFIG_PAX_REFCOUNT
29109+2:
29110+#endif
29111+
29112+RET_ENDP
29113+BEGIN(sub_return_unchecked)
29114+ negl %edx
29115+ negl %eax
29116+ sbbl $0, %edx
29117+ addl (v), %eax
29118+ adcl 4(v), %edx
29119 movl %eax, (v)
29120 movl %edx, 4(v)
29121 RET_ENDP
29122@@ -105,6 +180,20 @@ RET_ENDP
29123 BEGIN(inc)
29124 addl $1, (v)
29125 adcl $0, 4(v)
29126+
29127+#ifdef CONFIG_PAX_REFCOUNT
29128+ jno 0f
29129+ subl $1, (v)
29130+ sbbl $0, 4(v)
29131+ int $4
29132+0:
29133+ _ASM_EXTABLE(0b, 0b)
29134+#endif
29135+
29136+RET_ENDP
29137+BEGIN(inc_unchecked)
29138+ addl $1, (v)
29139+ adcl $0, 4(v)
29140 RET_ENDP
29141 #undef v
29142
29143@@ -114,6 +203,26 @@ BEGIN(inc_return)
29144 movl 4(v), %edx
29145 addl $1, %eax
29146 adcl $0, %edx
29147+
29148+#ifdef CONFIG_PAX_REFCOUNT
29149+ into
29150+1234:
29151+ _ASM_EXTABLE(1234b, 2f)
29152+#endif
29153+
29154+ movl %eax, (v)
29155+ movl %edx, 4(v)
29156+
29157+#ifdef CONFIG_PAX_REFCOUNT
29158+2:
29159+#endif
29160+
29161+RET_ENDP
29162+BEGIN(inc_return_unchecked)
29163+ movl (v), %eax
29164+ movl 4(v), %edx
29165+ addl $1, %eax
29166+ adcl $0, %edx
29167 movl %eax, (v)
29168 movl %edx, 4(v)
29169 RET_ENDP
29170@@ -123,6 +232,20 @@ RET_ENDP
29171 BEGIN(dec)
29172 subl $1, (v)
29173 sbbl $0, 4(v)
29174+
29175+#ifdef CONFIG_PAX_REFCOUNT
29176+ jno 0f
29177+ addl $1, (v)
29178+ adcl $0, 4(v)
29179+ int $4
29180+0:
29181+ _ASM_EXTABLE(0b, 0b)
29182+#endif
29183+
29184+RET_ENDP
29185+BEGIN(dec_unchecked)
29186+ subl $1, (v)
29187+ sbbl $0, 4(v)
29188 RET_ENDP
29189 #undef v
29190
29191@@ -132,6 +255,26 @@ BEGIN(dec_return)
29192 movl 4(v), %edx
29193 subl $1, %eax
29194 sbbl $0, %edx
29195+
29196+#ifdef CONFIG_PAX_REFCOUNT
29197+ into
29198+1234:
29199+ _ASM_EXTABLE(1234b, 2f)
29200+#endif
29201+
29202+ movl %eax, (v)
29203+ movl %edx, 4(v)
29204+
29205+#ifdef CONFIG_PAX_REFCOUNT
29206+2:
29207+#endif
29208+
29209+RET_ENDP
29210+BEGIN(dec_return_unchecked)
29211+ movl (v), %eax
29212+ movl 4(v), %edx
29213+ subl $1, %eax
29214+ sbbl $0, %edx
29215 movl %eax, (v)
29216 movl %edx, 4(v)
29217 RET_ENDP
29218@@ -143,6 +286,13 @@ BEGIN(add_unless)
29219 adcl %edx, %edi
29220 addl (v), %eax
29221 adcl 4(v), %edx
29222+
29223+#ifdef CONFIG_PAX_REFCOUNT
29224+ into
29225+1234:
29226+ _ASM_EXTABLE(1234b, 2f)
29227+#endif
29228+
29229 cmpl %eax, %ecx
29230 je 3f
29231 1:
29232@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
29233 1:
29234 addl $1, %eax
29235 adcl $0, %edx
29236+
29237+#ifdef CONFIG_PAX_REFCOUNT
29238+ into
29239+1234:
29240+ _ASM_EXTABLE(1234b, 2f)
29241+#endif
29242+
29243 movl %eax, (v)
29244 movl %edx, 4(v)
29245 movl $1, %eax
29246@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
29247 movl 4(v), %edx
29248 subl $1, %eax
29249 sbbl $0, %edx
29250+
29251+#ifdef CONFIG_PAX_REFCOUNT
29252+ into
29253+1234:
29254+ _ASM_EXTABLE(1234b, 1f)
29255+#endif
29256+
29257 js 1f
29258 movl %eax, (v)
29259 movl %edx, 4(v)
29260diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
29261index f5cc9eb..51fa319 100644
29262--- a/arch/x86/lib/atomic64_cx8_32.S
29263+++ b/arch/x86/lib/atomic64_cx8_32.S
29264@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
29265 CFI_STARTPROC
29266
29267 read64 %ecx
29268+ pax_force_retaddr
29269 ret
29270 CFI_ENDPROC
29271 ENDPROC(atomic64_read_cx8)
29272
29273+ENTRY(atomic64_read_unchecked_cx8)
29274+ CFI_STARTPROC
29275+
29276+ read64 %ecx
29277+ pax_force_retaddr
29278+ ret
29279+ CFI_ENDPROC
29280+ENDPROC(atomic64_read_unchecked_cx8)
29281+
29282 ENTRY(atomic64_set_cx8)
29283 CFI_STARTPROC
29284
29285@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
29286 cmpxchg8b (%esi)
29287 jne 1b
29288
29289+ pax_force_retaddr
29290 ret
29291 CFI_ENDPROC
29292 ENDPROC(atomic64_set_cx8)
29293
29294+ENTRY(atomic64_set_unchecked_cx8)
29295+ CFI_STARTPROC
29296+
29297+1:
29298+/* we don't need LOCK_PREFIX since aligned 64-bit writes
29299+ * are atomic on 586 and newer */
29300+ cmpxchg8b (%esi)
29301+ jne 1b
29302+
29303+ pax_force_retaddr
29304+ ret
29305+ CFI_ENDPROC
29306+ENDPROC(atomic64_set_unchecked_cx8)
29307+
29308 ENTRY(atomic64_xchg_cx8)
29309 CFI_STARTPROC
29310
29311@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
29312 cmpxchg8b (%esi)
29313 jne 1b
29314
29315+ pax_force_retaddr
29316 ret
29317 CFI_ENDPROC
29318 ENDPROC(atomic64_xchg_cx8)
29319
29320-.macro addsub_return func ins insc
29321-ENTRY(atomic64_\func\()_return_cx8)
29322+.macro addsub_return func ins insc unchecked=""
29323+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29324 CFI_STARTPROC
29325 SAVE ebp
29326 SAVE ebx
29327@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
29328 movl %edx, %ecx
29329 \ins\()l %esi, %ebx
29330 \insc\()l %edi, %ecx
29331+
29332+.ifb \unchecked
29333+#ifdef CONFIG_PAX_REFCOUNT
29334+ into
29335+2:
29336+ _ASM_EXTABLE(2b, 3f)
29337+#endif
29338+.endif
29339+
29340 LOCK_PREFIX
29341 cmpxchg8b (%ebp)
29342 jne 1b
29343-
29344-10:
29345 movl %ebx, %eax
29346 movl %ecx, %edx
29347+
29348+.ifb \unchecked
29349+#ifdef CONFIG_PAX_REFCOUNT
29350+3:
29351+#endif
29352+.endif
29353+
29354 RESTORE edi
29355 RESTORE esi
29356 RESTORE ebx
29357 RESTORE ebp
29358+ pax_force_retaddr
29359 ret
29360 CFI_ENDPROC
29361-ENDPROC(atomic64_\func\()_return_cx8)
29362+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29363 .endm
29364
29365 addsub_return add add adc
29366 addsub_return sub sub sbb
29367+addsub_return add add adc _unchecked
29368+addsub_return sub sub sbb _unchecked
29369
29370-.macro incdec_return func ins insc
29371-ENTRY(atomic64_\func\()_return_cx8)
29372+.macro incdec_return func ins insc unchecked=""
29373+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29374 CFI_STARTPROC
29375 SAVE ebx
29376
29377@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
29378 movl %edx, %ecx
29379 \ins\()l $1, %ebx
29380 \insc\()l $0, %ecx
29381+
29382+.ifb \unchecked
29383+#ifdef CONFIG_PAX_REFCOUNT
29384+ into
29385+2:
29386+ _ASM_EXTABLE(2b, 3f)
29387+#endif
29388+.endif
29389+
29390 LOCK_PREFIX
29391 cmpxchg8b (%esi)
29392 jne 1b
29393
29394-10:
29395 movl %ebx, %eax
29396 movl %ecx, %edx
29397+
29398+.ifb \unchecked
29399+#ifdef CONFIG_PAX_REFCOUNT
29400+3:
29401+#endif
29402+.endif
29403+
29404 RESTORE ebx
29405+ pax_force_retaddr
29406 ret
29407 CFI_ENDPROC
29408-ENDPROC(atomic64_\func\()_return_cx8)
29409+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29410 .endm
29411
29412 incdec_return inc add adc
29413 incdec_return dec sub sbb
29414+incdec_return inc add adc _unchecked
29415+incdec_return dec sub sbb _unchecked
29416
29417 ENTRY(atomic64_dec_if_positive_cx8)
29418 CFI_STARTPROC
29419@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
29420 movl %edx, %ecx
29421 subl $1, %ebx
29422 sbb $0, %ecx
29423+
29424+#ifdef CONFIG_PAX_REFCOUNT
29425+ into
29426+1234:
29427+ _ASM_EXTABLE(1234b, 2f)
29428+#endif
29429+
29430 js 2f
29431 LOCK_PREFIX
29432 cmpxchg8b (%esi)
29433@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
29434 movl %ebx, %eax
29435 movl %ecx, %edx
29436 RESTORE ebx
29437+ pax_force_retaddr
29438 ret
29439 CFI_ENDPROC
29440 ENDPROC(atomic64_dec_if_positive_cx8)
29441@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
29442 movl %edx, %ecx
29443 addl %ebp, %ebx
29444 adcl %edi, %ecx
29445+
29446+#ifdef CONFIG_PAX_REFCOUNT
29447+ into
29448+1234:
29449+ _ASM_EXTABLE(1234b, 3f)
29450+#endif
29451+
29452 LOCK_PREFIX
29453 cmpxchg8b (%esi)
29454 jne 1b
29455@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
29456 CFI_ADJUST_CFA_OFFSET -8
29457 RESTORE ebx
29458 RESTORE ebp
29459+ pax_force_retaddr
29460 ret
29461 4:
29462 cmpl %edx, 4(%esp)
29463@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
29464 xorl %ecx, %ecx
29465 addl $1, %ebx
29466 adcl %edx, %ecx
29467+
29468+#ifdef CONFIG_PAX_REFCOUNT
29469+ into
29470+1234:
29471+ _ASM_EXTABLE(1234b, 3f)
29472+#endif
29473+
29474 LOCK_PREFIX
29475 cmpxchg8b (%esi)
29476 jne 1b
29477@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
29478 movl $1, %eax
29479 3:
29480 RESTORE ebx
29481+ pax_force_retaddr
29482 ret
29483 CFI_ENDPROC
29484 ENDPROC(atomic64_inc_not_zero_cx8)
29485diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
29486index e78b8eee..7e173a8 100644
29487--- a/arch/x86/lib/checksum_32.S
29488+++ b/arch/x86/lib/checksum_32.S
29489@@ -29,7 +29,8 @@
29490 #include <asm/dwarf2.h>
29491 #include <asm/errno.h>
29492 #include <asm/asm.h>
29493-
29494+#include <asm/segment.h>
29495+
29496 /*
29497 * computes a partial checksum, e.g. for TCP/UDP fragments
29498 */
29499@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
29500
29501 #define ARGBASE 16
29502 #define FP 12
29503-
29504-ENTRY(csum_partial_copy_generic)
29505+
29506+ENTRY(csum_partial_copy_generic_to_user)
29507 CFI_STARTPROC
29508+
29509+#ifdef CONFIG_PAX_MEMORY_UDEREF
29510+ pushl_cfi %gs
29511+ popl_cfi %es
29512+ jmp csum_partial_copy_generic
29513+#endif
29514+
29515+ENTRY(csum_partial_copy_generic_from_user)
29516+
29517+#ifdef CONFIG_PAX_MEMORY_UDEREF
29518+ pushl_cfi %gs
29519+ popl_cfi %ds
29520+#endif
29521+
29522+ENTRY(csum_partial_copy_generic)
29523 subl $4,%esp
29524 CFI_ADJUST_CFA_OFFSET 4
29525 pushl_cfi %edi
29526@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
29527 jmp 4f
29528 SRC(1: movw (%esi), %bx )
29529 addl $2, %esi
29530-DST( movw %bx, (%edi) )
29531+DST( movw %bx, %es:(%edi) )
29532 addl $2, %edi
29533 addw %bx, %ax
29534 adcl $0, %eax
29535@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
29536 SRC(1: movl (%esi), %ebx )
29537 SRC( movl 4(%esi), %edx )
29538 adcl %ebx, %eax
29539-DST( movl %ebx, (%edi) )
29540+DST( movl %ebx, %es:(%edi) )
29541 adcl %edx, %eax
29542-DST( movl %edx, 4(%edi) )
29543+DST( movl %edx, %es:4(%edi) )
29544
29545 SRC( movl 8(%esi), %ebx )
29546 SRC( movl 12(%esi), %edx )
29547 adcl %ebx, %eax
29548-DST( movl %ebx, 8(%edi) )
29549+DST( movl %ebx, %es:8(%edi) )
29550 adcl %edx, %eax
29551-DST( movl %edx, 12(%edi) )
29552+DST( movl %edx, %es:12(%edi) )
29553
29554 SRC( movl 16(%esi), %ebx )
29555 SRC( movl 20(%esi), %edx )
29556 adcl %ebx, %eax
29557-DST( movl %ebx, 16(%edi) )
29558+DST( movl %ebx, %es:16(%edi) )
29559 adcl %edx, %eax
29560-DST( movl %edx, 20(%edi) )
29561+DST( movl %edx, %es:20(%edi) )
29562
29563 SRC( movl 24(%esi), %ebx )
29564 SRC( movl 28(%esi), %edx )
29565 adcl %ebx, %eax
29566-DST( movl %ebx, 24(%edi) )
29567+DST( movl %ebx, %es:24(%edi) )
29568 adcl %edx, %eax
29569-DST( movl %edx, 28(%edi) )
29570+DST( movl %edx, %es:28(%edi) )
29571
29572 lea 32(%esi), %esi
29573 lea 32(%edi), %edi
29574@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
29575 shrl $2, %edx # This clears CF
29576 SRC(3: movl (%esi), %ebx )
29577 adcl %ebx, %eax
29578-DST( movl %ebx, (%edi) )
29579+DST( movl %ebx, %es:(%edi) )
29580 lea 4(%esi), %esi
29581 lea 4(%edi), %edi
29582 dec %edx
29583@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
29584 jb 5f
29585 SRC( movw (%esi), %cx )
29586 leal 2(%esi), %esi
29587-DST( movw %cx, (%edi) )
29588+DST( movw %cx, %es:(%edi) )
29589 leal 2(%edi), %edi
29590 je 6f
29591 shll $16,%ecx
29592 SRC(5: movb (%esi), %cl )
29593-DST( movb %cl, (%edi) )
29594+DST( movb %cl, %es:(%edi) )
29595 6: addl %ecx, %eax
29596 adcl $0, %eax
29597 7:
29598@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
29599
29600 6001:
29601 movl ARGBASE+20(%esp), %ebx # src_err_ptr
29602- movl $-EFAULT, (%ebx)
29603+ movl $-EFAULT, %ss:(%ebx)
29604
29605 # zero the complete destination - computing the rest
29606 # is too much work
29607@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
29608
29609 6002:
29610 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29611- movl $-EFAULT,(%ebx)
29612+ movl $-EFAULT,%ss:(%ebx)
29613 jmp 5000b
29614
29615 .previous
29616
29617+ pushl_cfi %ss
29618+ popl_cfi %ds
29619+ pushl_cfi %ss
29620+ popl_cfi %es
29621 popl_cfi %ebx
29622 CFI_RESTORE ebx
29623 popl_cfi %esi
29624@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
29625 popl_cfi %ecx # equivalent to addl $4,%esp
29626 ret
29627 CFI_ENDPROC
29628-ENDPROC(csum_partial_copy_generic)
29629+ENDPROC(csum_partial_copy_generic_to_user)
29630
29631 #else
29632
29633 /* Version for PentiumII/PPro */
29634
29635 #define ROUND1(x) \
29636+ nop; nop; nop; \
29637 SRC(movl x(%esi), %ebx ) ; \
29638 addl %ebx, %eax ; \
29639- DST(movl %ebx, x(%edi) ) ;
29640+ DST(movl %ebx, %es:x(%edi)) ;
29641
29642 #define ROUND(x) \
29643+ nop; nop; nop; \
29644 SRC(movl x(%esi), %ebx ) ; \
29645 adcl %ebx, %eax ; \
29646- DST(movl %ebx, x(%edi) ) ;
29647+ DST(movl %ebx, %es:x(%edi)) ;
29648
29649 #define ARGBASE 12
29650-
29651-ENTRY(csum_partial_copy_generic)
29652+
29653+ENTRY(csum_partial_copy_generic_to_user)
29654 CFI_STARTPROC
29655+
29656+#ifdef CONFIG_PAX_MEMORY_UDEREF
29657+ pushl_cfi %gs
29658+ popl_cfi %es
29659+ jmp csum_partial_copy_generic
29660+#endif
29661+
29662+ENTRY(csum_partial_copy_generic_from_user)
29663+
29664+#ifdef CONFIG_PAX_MEMORY_UDEREF
29665+ pushl_cfi %gs
29666+ popl_cfi %ds
29667+#endif
29668+
29669+ENTRY(csum_partial_copy_generic)
29670 pushl_cfi %ebx
29671 CFI_REL_OFFSET ebx, 0
29672 pushl_cfi %edi
29673@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
29674 subl %ebx, %edi
29675 lea -1(%esi),%edx
29676 andl $-32,%edx
29677- lea 3f(%ebx,%ebx), %ebx
29678+ lea 3f(%ebx,%ebx,2), %ebx
29679 testl %esi, %esi
29680 jmp *%ebx
29681 1: addl $64,%esi
29682@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
29683 jb 5f
29684 SRC( movw (%esi), %dx )
29685 leal 2(%esi), %esi
29686-DST( movw %dx, (%edi) )
29687+DST( movw %dx, %es:(%edi) )
29688 leal 2(%edi), %edi
29689 je 6f
29690 shll $16,%edx
29691 5:
29692 SRC( movb (%esi), %dl )
29693-DST( movb %dl, (%edi) )
29694+DST( movb %dl, %es:(%edi) )
29695 6: addl %edx, %eax
29696 adcl $0, %eax
29697 7:
29698 .section .fixup, "ax"
29699 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
29700- movl $-EFAULT, (%ebx)
29701+ movl $-EFAULT, %ss:(%ebx)
29702 # zero the complete destination (computing the rest is too much work)
29703 movl ARGBASE+8(%esp),%edi # dst
29704 movl ARGBASE+12(%esp),%ecx # len
29705@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
29706 rep; stosb
29707 jmp 7b
29708 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29709- movl $-EFAULT, (%ebx)
29710+ movl $-EFAULT, %ss:(%ebx)
29711 jmp 7b
29712 .previous
29713
29714+#ifdef CONFIG_PAX_MEMORY_UDEREF
29715+ pushl_cfi %ss
29716+ popl_cfi %ds
29717+ pushl_cfi %ss
29718+ popl_cfi %es
29719+#endif
29720+
29721 popl_cfi %esi
29722 CFI_RESTORE esi
29723 popl_cfi %edi
29724@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
29725 CFI_RESTORE ebx
29726 ret
29727 CFI_ENDPROC
29728-ENDPROC(csum_partial_copy_generic)
29729+ENDPROC(csum_partial_copy_generic_to_user)
29730
29731 #undef ROUND
29732 #undef ROUND1
29733diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
29734index f2145cf..cea889d 100644
29735--- a/arch/x86/lib/clear_page_64.S
29736+++ b/arch/x86/lib/clear_page_64.S
29737@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
29738 movl $4096/8,%ecx
29739 xorl %eax,%eax
29740 rep stosq
29741+ pax_force_retaddr
29742 ret
29743 CFI_ENDPROC
29744 ENDPROC(clear_page_c)
29745@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
29746 movl $4096,%ecx
29747 xorl %eax,%eax
29748 rep stosb
29749+ pax_force_retaddr
29750 ret
29751 CFI_ENDPROC
29752 ENDPROC(clear_page_c_e)
29753@@ -43,6 +45,7 @@ ENTRY(clear_page)
29754 leaq 64(%rdi),%rdi
29755 jnz .Lloop
29756 nop
29757+ pax_force_retaddr
29758 ret
29759 CFI_ENDPROC
29760 .Lclear_page_end:
29761@@ -58,7 +61,7 @@ ENDPROC(clear_page)
29762
29763 #include <asm/cpufeature.h>
29764
29765- .section .altinstr_replacement,"ax"
29766+ .section .altinstr_replacement,"a"
29767 1: .byte 0xeb /* jmp <disp8> */
29768 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
29769 2: .byte 0xeb /* jmp <disp8> */
29770diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
29771index 40a1725..5d12ac4 100644
29772--- a/arch/x86/lib/cmpxchg16b_emu.S
29773+++ b/arch/x86/lib/cmpxchg16b_emu.S
29774@@ -8,6 +8,7 @@
29775 #include <linux/linkage.h>
29776 #include <asm/dwarf2.h>
29777 #include <asm/percpu.h>
29778+#include <asm/alternative-asm.h>
29779
29780 .text
29781
29782@@ -46,12 +47,14 @@ CFI_STARTPROC
29783 CFI_REMEMBER_STATE
29784 popfq_cfi
29785 mov $1, %al
29786+ pax_force_retaddr
29787 ret
29788
29789 CFI_RESTORE_STATE
29790 .Lnot_same:
29791 popfq_cfi
29792 xor %al,%al
29793+ pax_force_retaddr
29794 ret
29795
29796 CFI_ENDPROC
29797diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
29798index 176cca6..e0d658e 100644
29799--- a/arch/x86/lib/copy_page_64.S
29800+++ b/arch/x86/lib/copy_page_64.S
29801@@ -9,6 +9,7 @@ copy_page_rep:
29802 CFI_STARTPROC
29803 movl $4096/8, %ecx
29804 rep movsq
29805+ pax_force_retaddr
29806 ret
29807 CFI_ENDPROC
29808 ENDPROC(copy_page_rep)
29809@@ -24,8 +25,8 @@ ENTRY(copy_page)
29810 CFI_ADJUST_CFA_OFFSET 2*8
29811 movq %rbx, (%rsp)
29812 CFI_REL_OFFSET rbx, 0
29813- movq %r12, 1*8(%rsp)
29814- CFI_REL_OFFSET r12, 1*8
29815+ movq %r13, 1*8(%rsp)
29816+ CFI_REL_OFFSET r13, 1*8
29817
29818 movl $(4096/64)-5, %ecx
29819 .p2align 4
29820@@ -38,7 +39,7 @@ ENTRY(copy_page)
29821 movq 0x8*4(%rsi), %r9
29822 movq 0x8*5(%rsi), %r10
29823 movq 0x8*6(%rsi), %r11
29824- movq 0x8*7(%rsi), %r12
29825+ movq 0x8*7(%rsi), %r13
29826
29827 prefetcht0 5*64(%rsi)
29828
29829@@ -49,7 +50,7 @@ ENTRY(copy_page)
29830 movq %r9, 0x8*4(%rdi)
29831 movq %r10, 0x8*5(%rdi)
29832 movq %r11, 0x8*6(%rdi)
29833- movq %r12, 0x8*7(%rdi)
29834+ movq %r13, 0x8*7(%rdi)
29835
29836 leaq 64 (%rsi), %rsi
29837 leaq 64 (%rdi), %rdi
29838@@ -68,7 +69,7 @@ ENTRY(copy_page)
29839 movq 0x8*4(%rsi), %r9
29840 movq 0x8*5(%rsi), %r10
29841 movq 0x8*6(%rsi), %r11
29842- movq 0x8*7(%rsi), %r12
29843+ movq 0x8*7(%rsi), %r13
29844
29845 movq %rax, 0x8*0(%rdi)
29846 movq %rbx, 0x8*1(%rdi)
29847@@ -77,7 +78,7 @@ ENTRY(copy_page)
29848 movq %r9, 0x8*4(%rdi)
29849 movq %r10, 0x8*5(%rdi)
29850 movq %r11, 0x8*6(%rdi)
29851- movq %r12, 0x8*7(%rdi)
29852+ movq %r13, 0x8*7(%rdi)
29853
29854 leaq 64(%rdi), %rdi
29855 leaq 64(%rsi), %rsi
29856@@ -85,10 +86,11 @@ ENTRY(copy_page)
29857
29858 movq (%rsp), %rbx
29859 CFI_RESTORE rbx
29860- movq 1*8(%rsp), %r12
29861- CFI_RESTORE r12
29862+ movq 1*8(%rsp), %r13
29863+ CFI_RESTORE r13
29864 addq $2*8, %rsp
29865 CFI_ADJUST_CFA_OFFSET -2*8
29866+ pax_force_retaddr
29867 ret
29868 .Lcopy_page_end:
29869 CFI_ENDPROC
29870@@ -99,7 +101,7 @@ ENDPROC(copy_page)
29871
29872 #include <asm/cpufeature.h>
29873
29874- .section .altinstr_replacement,"ax"
29875+ .section .altinstr_replacement,"a"
29876 1: .byte 0xeb /* jmp <disp8> */
29877 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
29878 2:
29879diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
29880index dee945d..a84067b 100644
29881--- a/arch/x86/lib/copy_user_64.S
29882+++ b/arch/x86/lib/copy_user_64.S
29883@@ -18,31 +18,7 @@
29884 #include <asm/alternative-asm.h>
29885 #include <asm/asm.h>
29886 #include <asm/smap.h>
29887-
29888-/*
29889- * By placing feature2 after feature1 in altinstructions section, we logically
29890- * implement:
29891- * If CPU has feature2, jmp to alt2 is used
29892- * else if CPU has feature1, jmp to alt1 is used
29893- * else jmp to orig is used.
29894- */
29895- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
29896-0:
29897- .byte 0xe9 /* 32bit jump */
29898- .long \orig-1f /* by default jump to orig */
29899-1:
29900- .section .altinstr_replacement,"ax"
29901-2: .byte 0xe9 /* near jump with 32bit immediate */
29902- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
29903-3: .byte 0xe9 /* near jump with 32bit immediate */
29904- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
29905- .previous
29906-
29907- .section .altinstructions,"a"
29908- altinstruction_entry 0b,2b,\feature1,5,5
29909- altinstruction_entry 0b,3b,\feature2,5,5
29910- .previous
29911- .endm
29912+#include <asm/pgtable.h>
29913
29914 .macro ALIGN_DESTINATION
29915 #ifdef FIX_ALIGNMENT
29916@@ -70,52 +46,6 @@
29917 #endif
29918 .endm
29919
29920-/* Standard copy_to_user with segment limit checking */
29921-ENTRY(_copy_to_user)
29922- CFI_STARTPROC
29923- GET_THREAD_INFO(%rax)
29924- movq %rdi,%rcx
29925- addq %rdx,%rcx
29926- jc bad_to_user
29927- cmpq TI_addr_limit(%rax),%rcx
29928- ja bad_to_user
29929- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29930- copy_user_generic_unrolled,copy_user_generic_string, \
29931- copy_user_enhanced_fast_string
29932- CFI_ENDPROC
29933-ENDPROC(_copy_to_user)
29934-
29935-/* Standard copy_from_user with segment limit checking */
29936-ENTRY(_copy_from_user)
29937- CFI_STARTPROC
29938- GET_THREAD_INFO(%rax)
29939- movq %rsi,%rcx
29940- addq %rdx,%rcx
29941- jc bad_from_user
29942- cmpq TI_addr_limit(%rax),%rcx
29943- ja bad_from_user
29944- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29945- copy_user_generic_unrolled,copy_user_generic_string, \
29946- copy_user_enhanced_fast_string
29947- CFI_ENDPROC
29948-ENDPROC(_copy_from_user)
29949-
29950- .section .fixup,"ax"
29951- /* must zero dest */
29952-ENTRY(bad_from_user)
29953-bad_from_user:
29954- CFI_STARTPROC
29955- movl %edx,%ecx
29956- xorl %eax,%eax
29957- rep
29958- stosb
29959-bad_to_user:
29960- movl %edx,%eax
29961- ret
29962- CFI_ENDPROC
29963-ENDPROC(bad_from_user)
29964- .previous
29965-
29966 /*
29967 * copy_user_generic_unrolled - memory copy with exception handling.
29968 * This version is for CPUs like P4 that don't have efficient micro
29969@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
29970 */
29971 ENTRY(copy_user_generic_unrolled)
29972 CFI_STARTPROC
29973+ ASM_PAX_OPEN_USERLAND
29974 ASM_STAC
29975 cmpl $8,%edx
29976 jb 20f /* less then 8 bytes, go to byte copy loop */
29977@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
29978 jnz 21b
29979 23: xor %eax,%eax
29980 ASM_CLAC
29981+ ASM_PAX_CLOSE_USERLAND
29982+ pax_force_retaddr
29983 ret
29984
29985 .section .fixup,"ax"
29986@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
29987 */
29988 ENTRY(copy_user_generic_string)
29989 CFI_STARTPROC
29990+ ASM_PAX_OPEN_USERLAND
29991 ASM_STAC
29992 cmpl $8,%edx
29993 jb 2f /* less than 8 bytes, go to byte copy loop */
29994@@ -249,6 +183,8 @@ ENTRY(copy_user_generic_string)
29995 movsb
29996 xorl %eax,%eax
29997 ASM_CLAC
29998+ ASM_PAX_CLOSE_USERLAND
29999+ pax_force_retaddr
30000 ret
30001
30002 .section .fixup,"ax"
30003@@ -276,12 +212,15 @@ ENDPROC(copy_user_generic_string)
30004 */
30005 ENTRY(copy_user_enhanced_fast_string)
30006 CFI_STARTPROC
30007+ ASM_PAX_OPEN_USERLAND
30008 ASM_STAC
30009 movl %edx,%ecx
30010 1: rep
30011 movsb
30012 xorl %eax,%eax
30013 ASM_CLAC
30014+ ASM_PAX_CLOSE_USERLAND
30015+ pax_force_retaddr
30016 ret
30017
30018 .section .fixup,"ax"
30019diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
30020index 6a4f43c..c70fb52 100644
30021--- a/arch/x86/lib/copy_user_nocache_64.S
30022+++ b/arch/x86/lib/copy_user_nocache_64.S
30023@@ -8,6 +8,7 @@
30024
30025 #include <linux/linkage.h>
30026 #include <asm/dwarf2.h>
30027+#include <asm/alternative-asm.h>
30028
30029 #define FIX_ALIGNMENT 1
30030
30031@@ -16,6 +17,7 @@
30032 #include <asm/thread_info.h>
30033 #include <asm/asm.h>
30034 #include <asm/smap.h>
30035+#include <asm/pgtable.h>
30036
30037 .macro ALIGN_DESTINATION
30038 #ifdef FIX_ALIGNMENT
30039@@ -49,6 +51,16 @@
30040 */
30041 ENTRY(__copy_user_nocache)
30042 CFI_STARTPROC
30043+
30044+#ifdef CONFIG_PAX_MEMORY_UDEREF
30045+ mov pax_user_shadow_base,%rcx
30046+ cmp %rcx,%rsi
30047+ jae 1f
30048+ add %rcx,%rsi
30049+1:
30050+#endif
30051+
30052+ ASM_PAX_OPEN_USERLAND
30053 ASM_STAC
30054 cmpl $8,%edx
30055 jb 20f /* less then 8 bytes, go to byte copy loop */
30056@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
30057 jnz 21b
30058 23: xorl %eax,%eax
30059 ASM_CLAC
30060+ ASM_PAX_CLOSE_USERLAND
30061 sfence
30062+ pax_force_retaddr
30063 ret
30064
30065 .section .fixup,"ax"
30066diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
30067index 2419d5f..fe52d0e 100644
30068--- a/arch/x86/lib/csum-copy_64.S
30069+++ b/arch/x86/lib/csum-copy_64.S
30070@@ -9,6 +9,7 @@
30071 #include <asm/dwarf2.h>
30072 #include <asm/errno.h>
30073 #include <asm/asm.h>
30074+#include <asm/alternative-asm.h>
30075
30076 /*
30077 * Checksum copy with exception handling.
30078@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
30079 CFI_ADJUST_CFA_OFFSET 7*8
30080 movq %rbx, 2*8(%rsp)
30081 CFI_REL_OFFSET rbx, 2*8
30082- movq %r12, 3*8(%rsp)
30083- CFI_REL_OFFSET r12, 3*8
30084+ movq %r15, 3*8(%rsp)
30085+ CFI_REL_OFFSET r15, 3*8
30086 movq %r14, 4*8(%rsp)
30087 CFI_REL_OFFSET r14, 4*8
30088 movq %r13, 5*8(%rsp)
30089@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
30090 movl %edx, %ecx
30091
30092 xorl %r9d, %r9d
30093- movq %rcx, %r12
30094+ movq %rcx, %r15
30095
30096- shrq $6, %r12
30097+ shrq $6, %r15
30098 jz .Lhandle_tail /* < 64 */
30099
30100 clc
30101
30102 /* main loop. clear in 64 byte blocks */
30103 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
30104- /* r11: temp3, rdx: temp4, r12 loopcnt */
30105+ /* r11: temp3, rdx: temp4, r15 loopcnt */
30106 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
30107 .p2align 4
30108 .Lloop:
30109@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
30110 adcq %r14, %rax
30111 adcq %r13, %rax
30112
30113- decl %r12d
30114+ decl %r15d
30115
30116 dest
30117 movq %rbx, (%rsi)
30118@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
30119 .Lende:
30120 movq 2*8(%rsp), %rbx
30121 CFI_RESTORE rbx
30122- movq 3*8(%rsp), %r12
30123- CFI_RESTORE r12
30124+ movq 3*8(%rsp), %r15
30125+ CFI_RESTORE r15
30126 movq 4*8(%rsp), %r14
30127 CFI_RESTORE r14
30128 movq 5*8(%rsp), %r13
30129@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
30130 CFI_RESTORE rbp
30131 addq $7*8, %rsp
30132 CFI_ADJUST_CFA_OFFSET -7*8
30133+ pax_force_retaddr
30134 ret
30135 CFI_RESTORE_STATE
30136
30137diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
30138index 1318f75..44c30fd 100644
30139--- a/arch/x86/lib/csum-wrappers_64.c
30140+++ b/arch/x86/lib/csum-wrappers_64.c
30141@@ -52,10 +52,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
30142 len -= 2;
30143 }
30144 }
30145+ pax_open_userland();
30146 stac();
30147- isum = csum_partial_copy_generic((__force const void *)src,
30148+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
30149 dst, len, isum, errp, NULL);
30150 clac();
30151+ pax_close_userland();
30152 if (unlikely(*errp))
30153 goto out_err;
30154
30155@@ -109,10 +111,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
30156 }
30157
30158 *errp = 0;
30159+ pax_open_userland();
30160 stac();
30161- ret = csum_partial_copy_generic(src, (void __force *)dst,
30162+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
30163 len, isum, NULL, errp);
30164 clac();
30165+ pax_close_userland();
30166 return ret;
30167 }
30168 EXPORT_SYMBOL(csum_partial_copy_to_user);
30169diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
30170index a451235..a74bfa3 100644
30171--- a/arch/x86/lib/getuser.S
30172+++ b/arch/x86/lib/getuser.S
30173@@ -33,17 +33,40 @@
30174 #include <asm/thread_info.h>
30175 #include <asm/asm.h>
30176 #include <asm/smap.h>
30177+#include <asm/segment.h>
30178+#include <asm/pgtable.h>
30179+#include <asm/alternative-asm.h>
30180+
30181+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30182+#define __copyuser_seg gs;
30183+#else
30184+#define __copyuser_seg
30185+#endif
30186
30187 .text
30188 ENTRY(__get_user_1)
30189 CFI_STARTPROC
30190+
30191+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30192 GET_THREAD_INFO(%_ASM_DX)
30193 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30194 jae bad_get_user
30195+
30196+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30197+ mov pax_user_shadow_base,%_ASM_DX
30198+ cmp %_ASM_DX,%_ASM_AX
30199+ jae 1234f
30200+ add %_ASM_DX,%_ASM_AX
30201+1234:
30202+#endif
30203+
30204+#endif
30205+
30206 ASM_STAC
30207-1: movzbl (%_ASM_AX),%edx
30208+1: __copyuser_seg movzbl (%_ASM_AX),%edx
30209 xor %eax,%eax
30210 ASM_CLAC
30211+ pax_force_retaddr
30212 ret
30213 CFI_ENDPROC
30214 ENDPROC(__get_user_1)
30215@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
30216 ENTRY(__get_user_2)
30217 CFI_STARTPROC
30218 add $1,%_ASM_AX
30219+
30220+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30221 jc bad_get_user
30222 GET_THREAD_INFO(%_ASM_DX)
30223 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30224 jae bad_get_user
30225+
30226+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30227+ mov pax_user_shadow_base,%_ASM_DX
30228+ cmp %_ASM_DX,%_ASM_AX
30229+ jae 1234f
30230+ add %_ASM_DX,%_ASM_AX
30231+1234:
30232+#endif
30233+
30234+#endif
30235+
30236 ASM_STAC
30237-2: movzwl -1(%_ASM_AX),%edx
30238+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
30239 xor %eax,%eax
30240 ASM_CLAC
30241+ pax_force_retaddr
30242 ret
30243 CFI_ENDPROC
30244 ENDPROC(__get_user_2)
30245@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
30246 ENTRY(__get_user_4)
30247 CFI_STARTPROC
30248 add $3,%_ASM_AX
30249+
30250+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30251 jc bad_get_user
30252 GET_THREAD_INFO(%_ASM_DX)
30253 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30254 jae bad_get_user
30255+
30256+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30257+ mov pax_user_shadow_base,%_ASM_DX
30258+ cmp %_ASM_DX,%_ASM_AX
30259+ jae 1234f
30260+ add %_ASM_DX,%_ASM_AX
30261+1234:
30262+#endif
30263+
30264+#endif
30265+
30266 ASM_STAC
30267-3: movl -3(%_ASM_AX),%edx
30268+3: __copyuser_seg movl -3(%_ASM_AX),%edx
30269 xor %eax,%eax
30270 ASM_CLAC
30271+ pax_force_retaddr
30272 ret
30273 CFI_ENDPROC
30274 ENDPROC(__get_user_4)
30275@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
30276 GET_THREAD_INFO(%_ASM_DX)
30277 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30278 jae bad_get_user
30279+
30280+#ifdef CONFIG_PAX_MEMORY_UDEREF
30281+ mov pax_user_shadow_base,%_ASM_DX
30282+ cmp %_ASM_DX,%_ASM_AX
30283+ jae 1234f
30284+ add %_ASM_DX,%_ASM_AX
30285+1234:
30286+#endif
30287+
30288 ASM_STAC
30289 4: movq -7(%_ASM_AX),%rdx
30290 xor %eax,%eax
30291 ASM_CLAC
30292+ pax_force_retaddr
30293 ret
30294 #else
30295 add $7,%_ASM_AX
30296@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
30297 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30298 jae bad_get_user_8
30299 ASM_STAC
30300-4: movl -7(%_ASM_AX),%edx
30301-5: movl -3(%_ASM_AX),%ecx
30302+4: __copyuser_seg movl -7(%_ASM_AX),%edx
30303+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
30304 xor %eax,%eax
30305 ASM_CLAC
30306+ pax_force_retaddr
30307 ret
30308 #endif
30309 CFI_ENDPROC
30310@@ -113,6 +175,7 @@ bad_get_user:
30311 xor %edx,%edx
30312 mov $(-EFAULT),%_ASM_AX
30313 ASM_CLAC
30314+ pax_force_retaddr
30315 ret
30316 CFI_ENDPROC
30317 END(bad_get_user)
30318@@ -124,6 +187,7 @@ bad_get_user_8:
30319 xor %ecx,%ecx
30320 mov $(-EFAULT),%_ASM_AX
30321 ASM_CLAC
30322+ pax_force_retaddr
30323 ret
30324 CFI_ENDPROC
30325 END(bad_get_user_8)
30326diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
30327index 85994f5..9929d7f 100644
30328--- a/arch/x86/lib/insn.c
30329+++ b/arch/x86/lib/insn.c
30330@@ -20,8 +20,10 @@
30331
30332 #ifdef __KERNEL__
30333 #include <linux/string.h>
30334+#include <asm/pgtable_types.h>
30335 #else
30336 #include <string.h>
30337+#define ktla_ktva(addr) addr
30338 #endif
30339 #include <asm/inat.h>
30340 #include <asm/insn.h>
30341@@ -60,9 +62,9 @@ void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
30342 buf_len = MAX_INSN_SIZE;
30343
30344 memset(insn, 0, sizeof(*insn));
30345- insn->kaddr = kaddr;
30346- insn->end_kaddr = kaddr + buf_len;
30347- insn->next_byte = kaddr;
30348+ insn->kaddr = ktla_ktva(kaddr);
30349+ insn->end_kaddr = insn->kaddr + buf_len;
30350+ insn->next_byte = insn->kaddr;
30351 insn->x86_64 = x86_64 ? 1 : 0;
30352 insn->opnd_bytes = 4;
30353 if (x86_64)
30354diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
30355index 05a95e7..326f2fa 100644
30356--- a/arch/x86/lib/iomap_copy_64.S
30357+++ b/arch/x86/lib/iomap_copy_64.S
30358@@ -17,6 +17,7 @@
30359
30360 #include <linux/linkage.h>
30361 #include <asm/dwarf2.h>
30362+#include <asm/alternative-asm.h>
30363
30364 /*
30365 * override generic version in lib/iomap_copy.c
30366@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
30367 CFI_STARTPROC
30368 movl %edx,%ecx
30369 rep movsd
30370+ pax_force_retaddr
30371 ret
30372 CFI_ENDPROC
30373 ENDPROC(__iowrite32_copy)
30374diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
30375index 89b53c9..97357ca 100644
30376--- a/arch/x86/lib/memcpy_64.S
30377+++ b/arch/x86/lib/memcpy_64.S
30378@@ -24,7 +24,7 @@
30379 * This gets patched over the unrolled variant (below) via the
30380 * alternative instructions framework:
30381 */
30382- .section .altinstr_replacement, "ax", @progbits
30383+ .section .altinstr_replacement, "a", @progbits
30384 .Lmemcpy_c:
30385 movq %rdi, %rax
30386 movq %rdx, %rcx
30387@@ -33,6 +33,7 @@
30388 rep movsq
30389 movl %edx, %ecx
30390 rep movsb
30391+ pax_force_retaddr
30392 ret
30393 .Lmemcpy_e:
30394 .previous
30395@@ -44,11 +45,12 @@
30396 * This gets patched over the unrolled variant (below) via the
30397 * alternative instructions framework:
30398 */
30399- .section .altinstr_replacement, "ax", @progbits
30400+ .section .altinstr_replacement, "a", @progbits
30401 .Lmemcpy_c_e:
30402 movq %rdi, %rax
30403 movq %rdx, %rcx
30404 rep movsb
30405+ pax_force_retaddr
30406 ret
30407 .Lmemcpy_e_e:
30408 .previous
30409@@ -138,6 +140,7 @@ ENTRY(memcpy)
30410 movq %r9, 1*8(%rdi)
30411 movq %r10, -2*8(%rdi, %rdx)
30412 movq %r11, -1*8(%rdi, %rdx)
30413+ pax_force_retaddr
30414 retq
30415 .p2align 4
30416 .Lless_16bytes:
30417@@ -150,6 +153,7 @@ ENTRY(memcpy)
30418 movq -1*8(%rsi, %rdx), %r9
30419 movq %r8, 0*8(%rdi)
30420 movq %r9, -1*8(%rdi, %rdx)
30421+ pax_force_retaddr
30422 retq
30423 .p2align 4
30424 .Lless_8bytes:
30425@@ -163,6 +167,7 @@ ENTRY(memcpy)
30426 movl -4(%rsi, %rdx), %r8d
30427 movl %ecx, (%rdi)
30428 movl %r8d, -4(%rdi, %rdx)
30429+ pax_force_retaddr
30430 retq
30431 .p2align 4
30432 .Lless_3bytes:
30433@@ -181,6 +186,7 @@ ENTRY(memcpy)
30434 movb %cl, (%rdi)
30435
30436 .Lend:
30437+ pax_force_retaddr
30438 retq
30439 CFI_ENDPROC
30440 ENDPROC(memcpy)
30441diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
30442index 9c4b530..830b77a 100644
30443--- a/arch/x86/lib/memmove_64.S
30444+++ b/arch/x86/lib/memmove_64.S
30445@@ -205,14 +205,16 @@ ENTRY(__memmove)
30446 movb (%rsi), %r11b
30447 movb %r11b, (%rdi)
30448 13:
30449+ pax_force_retaddr
30450 retq
30451 CFI_ENDPROC
30452
30453- .section .altinstr_replacement,"ax"
30454+ .section .altinstr_replacement,"a"
30455 .Lmemmove_begin_forward_efs:
30456 /* Forward moving data. */
30457 movq %rdx, %rcx
30458 rep movsb
30459+ pax_force_retaddr
30460 retq
30461 .Lmemmove_end_forward_efs:
30462 .previous
30463diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
30464index 6f44935..fbf5f6d 100644
30465--- a/arch/x86/lib/memset_64.S
30466+++ b/arch/x86/lib/memset_64.S
30467@@ -16,7 +16,7 @@
30468 *
30469 * rax original destination
30470 */
30471- .section .altinstr_replacement, "ax", @progbits
30472+ .section .altinstr_replacement, "a", @progbits
30473 .Lmemset_c:
30474 movq %rdi,%r9
30475 movq %rdx,%rcx
30476@@ -30,6 +30,7 @@
30477 movl %edx,%ecx
30478 rep stosb
30479 movq %r9,%rax
30480+ pax_force_retaddr
30481 ret
30482 .Lmemset_e:
30483 .previous
30484@@ -45,13 +46,14 @@
30485 *
30486 * rax original destination
30487 */
30488- .section .altinstr_replacement, "ax", @progbits
30489+ .section .altinstr_replacement, "a", @progbits
30490 .Lmemset_c_e:
30491 movq %rdi,%r9
30492 movb %sil,%al
30493 movq %rdx,%rcx
30494 rep stosb
30495 movq %r9,%rax
30496+ pax_force_retaddr
30497 ret
30498 .Lmemset_e_e:
30499 .previous
30500@@ -120,6 +122,7 @@ ENTRY(__memset)
30501
30502 .Lende:
30503 movq %r10,%rax
30504+ pax_force_retaddr
30505 ret
30506
30507 CFI_RESTORE_STATE
30508diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
30509index c9f2d9b..e7fd2c0 100644
30510--- a/arch/x86/lib/mmx_32.c
30511+++ b/arch/x86/lib/mmx_32.c
30512@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30513 {
30514 void *p;
30515 int i;
30516+ unsigned long cr0;
30517
30518 if (unlikely(in_interrupt()))
30519 return __memcpy(to, from, len);
30520@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30521 kernel_fpu_begin();
30522
30523 __asm__ __volatile__ (
30524- "1: prefetch (%0)\n" /* This set is 28 bytes */
30525- " prefetch 64(%0)\n"
30526- " prefetch 128(%0)\n"
30527- " prefetch 192(%0)\n"
30528- " prefetch 256(%0)\n"
30529+ "1: prefetch (%1)\n" /* This set is 28 bytes */
30530+ " prefetch 64(%1)\n"
30531+ " prefetch 128(%1)\n"
30532+ " prefetch 192(%1)\n"
30533+ " prefetch 256(%1)\n"
30534 "2: \n"
30535 ".section .fixup, \"ax\"\n"
30536- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30537+ "3: \n"
30538+
30539+#ifdef CONFIG_PAX_KERNEXEC
30540+ " movl %%cr0, %0\n"
30541+ " movl %0, %%eax\n"
30542+ " andl $0xFFFEFFFF, %%eax\n"
30543+ " movl %%eax, %%cr0\n"
30544+#endif
30545+
30546+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30547+
30548+#ifdef CONFIG_PAX_KERNEXEC
30549+ " movl %0, %%cr0\n"
30550+#endif
30551+
30552 " jmp 2b\n"
30553 ".previous\n"
30554 _ASM_EXTABLE(1b, 3b)
30555- : : "r" (from));
30556+ : "=&r" (cr0) : "r" (from) : "ax");
30557
30558 for ( ; i > 5; i--) {
30559 __asm__ __volatile__ (
30560- "1: prefetch 320(%0)\n"
30561- "2: movq (%0), %%mm0\n"
30562- " movq 8(%0), %%mm1\n"
30563- " movq 16(%0), %%mm2\n"
30564- " movq 24(%0), %%mm3\n"
30565- " movq %%mm0, (%1)\n"
30566- " movq %%mm1, 8(%1)\n"
30567- " movq %%mm2, 16(%1)\n"
30568- " movq %%mm3, 24(%1)\n"
30569- " movq 32(%0), %%mm0\n"
30570- " movq 40(%0), %%mm1\n"
30571- " movq 48(%0), %%mm2\n"
30572- " movq 56(%0), %%mm3\n"
30573- " movq %%mm0, 32(%1)\n"
30574- " movq %%mm1, 40(%1)\n"
30575- " movq %%mm2, 48(%1)\n"
30576- " movq %%mm3, 56(%1)\n"
30577+ "1: prefetch 320(%1)\n"
30578+ "2: movq (%1), %%mm0\n"
30579+ " movq 8(%1), %%mm1\n"
30580+ " movq 16(%1), %%mm2\n"
30581+ " movq 24(%1), %%mm3\n"
30582+ " movq %%mm0, (%2)\n"
30583+ " movq %%mm1, 8(%2)\n"
30584+ " movq %%mm2, 16(%2)\n"
30585+ " movq %%mm3, 24(%2)\n"
30586+ " movq 32(%1), %%mm0\n"
30587+ " movq 40(%1), %%mm1\n"
30588+ " movq 48(%1), %%mm2\n"
30589+ " movq 56(%1), %%mm3\n"
30590+ " movq %%mm0, 32(%2)\n"
30591+ " movq %%mm1, 40(%2)\n"
30592+ " movq %%mm2, 48(%2)\n"
30593+ " movq %%mm3, 56(%2)\n"
30594 ".section .fixup, \"ax\"\n"
30595- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30596+ "3:\n"
30597+
30598+#ifdef CONFIG_PAX_KERNEXEC
30599+ " movl %%cr0, %0\n"
30600+ " movl %0, %%eax\n"
30601+ " andl $0xFFFEFFFF, %%eax\n"
30602+ " movl %%eax, %%cr0\n"
30603+#endif
30604+
30605+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30606+
30607+#ifdef CONFIG_PAX_KERNEXEC
30608+ " movl %0, %%cr0\n"
30609+#endif
30610+
30611 " jmp 2b\n"
30612 ".previous\n"
30613 _ASM_EXTABLE(1b, 3b)
30614- : : "r" (from), "r" (to) : "memory");
30615+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30616
30617 from += 64;
30618 to += 64;
30619@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
30620 static void fast_copy_page(void *to, void *from)
30621 {
30622 int i;
30623+ unsigned long cr0;
30624
30625 kernel_fpu_begin();
30626
30627@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
30628 * but that is for later. -AV
30629 */
30630 __asm__ __volatile__(
30631- "1: prefetch (%0)\n"
30632- " prefetch 64(%0)\n"
30633- " prefetch 128(%0)\n"
30634- " prefetch 192(%0)\n"
30635- " prefetch 256(%0)\n"
30636+ "1: prefetch (%1)\n"
30637+ " prefetch 64(%1)\n"
30638+ " prefetch 128(%1)\n"
30639+ " prefetch 192(%1)\n"
30640+ " prefetch 256(%1)\n"
30641 "2: \n"
30642 ".section .fixup, \"ax\"\n"
30643- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30644+ "3: \n"
30645+
30646+#ifdef CONFIG_PAX_KERNEXEC
30647+ " movl %%cr0, %0\n"
30648+ " movl %0, %%eax\n"
30649+ " andl $0xFFFEFFFF, %%eax\n"
30650+ " movl %%eax, %%cr0\n"
30651+#endif
30652+
30653+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30654+
30655+#ifdef CONFIG_PAX_KERNEXEC
30656+ " movl %0, %%cr0\n"
30657+#endif
30658+
30659 " jmp 2b\n"
30660 ".previous\n"
30661- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30662+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30663
30664 for (i = 0; i < (4096-320)/64; i++) {
30665 __asm__ __volatile__ (
30666- "1: prefetch 320(%0)\n"
30667- "2: movq (%0), %%mm0\n"
30668- " movntq %%mm0, (%1)\n"
30669- " movq 8(%0), %%mm1\n"
30670- " movntq %%mm1, 8(%1)\n"
30671- " movq 16(%0), %%mm2\n"
30672- " movntq %%mm2, 16(%1)\n"
30673- " movq 24(%0), %%mm3\n"
30674- " movntq %%mm3, 24(%1)\n"
30675- " movq 32(%0), %%mm4\n"
30676- " movntq %%mm4, 32(%1)\n"
30677- " movq 40(%0), %%mm5\n"
30678- " movntq %%mm5, 40(%1)\n"
30679- " movq 48(%0), %%mm6\n"
30680- " movntq %%mm6, 48(%1)\n"
30681- " movq 56(%0), %%mm7\n"
30682- " movntq %%mm7, 56(%1)\n"
30683+ "1: prefetch 320(%1)\n"
30684+ "2: movq (%1), %%mm0\n"
30685+ " movntq %%mm0, (%2)\n"
30686+ " movq 8(%1), %%mm1\n"
30687+ " movntq %%mm1, 8(%2)\n"
30688+ " movq 16(%1), %%mm2\n"
30689+ " movntq %%mm2, 16(%2)\n"
30690+ " movq 24(%1), %%mm3\n"
30691+ " movntq %%mm3, 24(%2)\n"
30692+ " movq 32(%1), %%mm4\n"
30693+ " movntq %%mm4, 32(%2)\n"
30694+ " movq 40(%1), %%mm5\n"
30695+ " movntq %%mm5, 40(%2)\n"
30696+ " movq 48(%1), %%mm6\n"
30697+ " movntq %%mm6, 48(%2)\n"
30698+ " movq 56(%1), %%mm7\n"
30699+ " movntq %%mm7, 56(%2)\n"
30700 ".section .fixup, \"ax\"\n"
30701- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30702+ "3:\n"
30703+
30704+#ifdef CONFIG_PAX_KERNEXEC
30705+ " movl %%cr0, %0\n"
30706+ " movl %0, %%eax\n"
30707+ " andl $0xFFFEFFFF, %%eax\n"
30708+ " movl %%eax, %%cr0\n"
30709+#endif
30710+
30711+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30712+
30713+#ifdef CONFIG_PAX_KERNEXEC
30714+ " movl %0, %%cr0\n"
30715+#endif
30716+
30717 " jmp 2b\n"
30718 ".previous\n"
30719- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
30720+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30721
30722 from += 64;
30723 to += 64;
30724@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
30725 static void fast_copy_page(void *to, void *from)
30726 {
30727 int i;
30728+ unsigned long cr0;
30729
30730 kernel_fpu_begin();
30731
30732 __asm__ __volatile__ (
30733- "1: prefetch (%0)\n"
30734- " prefetch 64(%0)\n"
30735- " prefetch 128(%0)\n"
30736- " prefetch 192(%0)\n"
30737- " prefetch 256(%0)\n"
30738+ "1: prefetch (%1)\n"
30739+ " prefetch 64(%1)\n"
30740+ " prefetch 128(%1)\n"
30741+ " prefetch 192(%1)\n"
30742+ " prefetch 256(%1)\n"
30743 "2: \n"
30744 ".section .fixup, \"ax\"\n"
30745- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30746+ "3: \n"
30747+
30748+#ifdef CONFIG_PAX_KERNEXEC
30749+ " movl %%cr0, %0\n"
30750+ " movl %0, %%eax\n"
30751+ " andl $0xFFFEFFFF, %%eax\n"
30752+ " movl %%eax, %%cr0\n"
30753+#endif
30754+
30755+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30756+
30757+#ifdef CONFIG_PAX_KERNEXEC
30758+ " movl %0, %%cr0\n"
30759+#endif
30760+
30761 " jmp 2b\n"
30762 ".previous\n"
30763- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30764+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30765
30766 for (i = 0; i < 4096/64; i++) {
30767 __asm__ __volatile__ (
30768- "1: prefetch 320(%0)\n"
30769- "2: movq (%0), %%mm0\n"
30770- " movq 8(%0), %%mm1\n"
30771- " movq 16(%0), %%mm2\n"
30772- " movq 24(%0), %%mm3\n"
30773- " movq %%mm0, (%1)\n"
30774- " movq %%mm1, 8(%1)\n"
30775- " movq %%mm2, 16(%1)\n"
30776- " movq %%mm3, 24(%1)\n"
30777- " movq 32(%0), %%mm0\n"
30778- " movq 40(%0), %%mm1\n"
30779- " movq 48(%0), %%mm2\n"
30780- " movq 56(%0), %%mm3\n"
30781- " movq %%mm0, 32(%1)\n"
30782- " movq %%mm1, 40(%1)\n"
30783- " movq %%mm2, 48(%1)\n"
30784- " movq %%mm3, 56(%1)\n"
30785+ "1: prefetch 320(%1)\n"
30786+ "2: movq (%1), %%mm0\n"
30787+ " movq 8(%1), %%mm1\n"
30788+ " movq 16(%1), %%mm2\n"
30789+ " movq 24(%1), %%mm3\n"
30790+ " movq %%mm0, (%2)\n"
30791+ " movq %%mm1, 8(%2)\n"
30792+ " movq %%mm2, 16(%2)\n"
30793+ " movq %%mm3, 24(%2)\n"
30794+ " movq 32(%1), %%mm0\n"
30795+ " movq 40(%1), %%mm1\n"
30796+ " movq 48(%1), %%mm2\n"
30797+ " movq 56(%1), %%mm3\n"
30798+ " movq %%mm0, 32(%2)\n"
30799+ " movq %%mm1, 40(%2)\n"
30800+ " movq %%mm2, 48(%2)\n"
30801+ " movq %%mm3, 56(%2)\n"
30802 ".section .fixup, \"ax\"\n"
30803- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30804+ "3:\n"
30805+
30806+#ifdef CONFIG_PAX_KERNEXEC
30807+ " movl %%cr0, %0\n"
30808+ " movl %0, %%eax\n"
30809+ " andl $0xFFFEFFFF, %%eax\n"
30810+ " movl %%eax, %%cr0\n"
30811+#endif
30812+
30813+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30814+
30815+#ifdef CONFIG_PAX_KERNEXEC
30816+ " movl %0, %%cr0\n"
30817+#endif
30818+
30819 " jmp 2b\n"
30820 ".previous\n"
30821 _ASM_EXTABLE(1b, 3b)
30822- : : "r" (from), "r" (to) : "memory");
30823+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30824
30825 from += 64;
30826 to += 64;
30827diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
30828index f6d13ee..d789440 100644
30829--- a/arch/x86/lib/msr-reg.S
30830+++ b/arch/x86/lib/msr-reg.S
30831@@ -3,6 +3,7 @@
30832 #include <asm/dwarf2.h>
30833 #include <asm/asm.h>
30834 #include <asm/msr.h>
30835+#include <asm/alternative-asm.h>
30836
30837 #ifdef CONFIG_X86_64
30838 /*
30839@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
30840 movl %edi, 28(%r10)
30841 popq_cfi %rbp
30842 popq_cfi %rbx
30843+ pax_force_retaddr
30844 ret
30845 3:
30846 CFI_RESTORE_STATE
30847diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
30848index fc6ba17..14ad9a5 100644
30849--- a/arch/x86/lib/putuser.S
30850+++ b/arch/x86/lib/putuser.S
30851@@ -16,7 +16,9 @@
30852 #include <asm/errno.h>
30853 #include <asm/asm.h>
30854 #include <asm/smap.h>
30855-
30856+#include <asm/segment.h>
30857+#include <asm/pgtable.h>
30858+#include <asm/alternative-asm.h>
30859
30860 /*
30861 * __put_user_X
30862@@ -30,57 +32,125 @@
30863 * as they get called from within inline assembly.
30864 */
30865
30866-#define ENTER CFI_STARTPROC ; \
30867- GET_THREAD_INFO(%_ASM_BX)
30868-#define EXIT ASM_CLAC ; \
30869- ret ; \
30870+#define ENTER CFI_STARTPROC
30871+#define EXIT ASM_CLAC ; \
30872+ pax_force_retaddr ; \
30873+ ret ; \
30874 CFI_ENDPROC
30875
30876+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30877+#define _DEST %_ASM_CX,%_ASM_BX
30878+#else
30879+#define _DEST %_ASM_CX
30880+#endif
30881+
30882+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30883+#define __copyuser_seg gs;
30884+#else
30885+#define __copyuser_seg
30886+#endif
30887+
30888 .text
30889 ENTRY(__put_user_1)
30890 ENTER
30891+
30892+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30893+ GET_THREAD_INFO(%_ASM_BX)
30894 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
30895 jae bad_put_user
30896+
30897+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30898+ mov pax_user_shadow_base,%_ASM_BX
30899+ cmp %_ASM_BX,%_ASM_CX
30900+ jb 1234f
30901+ xor %ebx,%ebx
30902+1234:
30903+#endif
30904+
30905+#endif
30906+
30907 ASM_STAC
30908-1: movb %al,(%_ASM_CX)
30909+1: __copyuser_seg movb %al,(_DEST)
30910 xor %eax,%eax
30911 EXIT
30912 ENDPROC(__put_user_1)
30913
30914 ENTRY(__put_user_2)
30915 ENTER
30916+
30917+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30918+ GET_THREAD_INFO(%_ASM_BX)
30919 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30920 sub $1,%_ASM_BX
30921 cmp %_ASM_BX,%_ASM_CX
30922 jae bad_put_user
30923+
30924+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30925+ mov pax_user_shadow_base,%_ASM_BX
30926+ cmp %_ASM_BX,%_ASM_CX
30927+ jb 1234f
30928+ xor %ebx,%ebx
30929+1234:
30930+#endif
30931+
30932+#endif
30933+
30934 ASM_STAC
30935-2: movw %ax,(%_ASM_CX)
30936+2: __copyuser_seg movw %ax,(_DEST)
30937 xor %eax,%eax
30938 EXIT
30939 ENDPROC(__put_user_2)
30940
30941 ENTRY(__put_user_4)
30942 ENTER
30943+
30944+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30945+ GET_THREAD_INFO(%_ASM_BX)
30946 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30947 sub $3,%_ASM_BX
30948 cmp %_ASM_BX,%_ASM_CX
30949 jae bad_put_user
30950+
30951+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30952+ mov pax_user_shadow_base,%_ASM_BX
30953+ cmp %_ASM_BX,%_ASM_CX
30954+ jb 1234f
30955+ xor %ebx,%ebx
30956+1234:
30957+#endif
30958+
30959+#endif
30960+
30961 ASM_STAC
30962-3: movl %eax,(%_ASM_CX)
30963+3: __copyuser_seg movl %eax,(_DEST)
30964 xor %eax,%eax
30965 EXIT
30966 ENDPROC(__put_user_4)
30967
30968 ENTRY(__put_user_8)
30969 ENTER
30970+
30971+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30972+ GET_THREAD_INFO(%_ASM_BX)
30973 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30974 sub $7,%_ASM_BX
30975 cmp %_ASM_BX,%_ASM_CX
30976 jae bad_put_user
30977+
30978+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30979+ mov pax_user_shadow_base,%_ASM_BX
30980+ cmp %_ASM_BX,%_ASM_CX
30981+ jb 1234f
30982+ xor %ebx,%ebx
30983+1234:
30984+#endif
30985+
30986+#endif
30987+
30988 ASM_STAC
30989-4: mov %_ASM_AX,(%_ASM_CX)
30990+4: __copyuser_seg mov %_ASM_AX,(_DEST)
30991 #ifdef CONFIG_X86_32
30992-5: movl %edx,4(%_ASM_CX)
30993+5: __copyuser_seg movl %edx,4(_DEST)
30994 #endif
30995 xor %eax,%eax
30996 EXIT
30997diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
30998index 5dff5f0..cadebf4 100644
30999--- a/arch/x86/lib/rwsem.S
31000+++ b/arch/x86/lib/rwsem.S
31001@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
31002 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
31003 CFI_RESTORE __ASM_REG(dx)
31004 restore_common_regs
31005+ pax_force_retaddr
31006 ret
31007 CFI_ENDPROC
31008 ENDPROC(call_rwsem_down_read_failed)
31009@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
31010 movq %rax,%rdi
31011 call rwsem_down_write_failed
31012 restore_common_regs
31013+ pax_force_retaddr
31014 ret
31015 CFI_ENDPROC
31016 ENDPROC(call_rwsem_down_write_failed)
31017@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
31018 movq %rax,%rdi
31019 call rwsem_wake
31020 restore_common_regs
31021-1: ret
31022+1: pax_force_retaddr
31023+ ret
31024 CFI_ENDPROC
31025 ENDPROC(call_rwsem_wake)
31026
31027@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
31028 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
31029 CFI_RESTORE __ASM_REG(dx)
31030 restore_common_regs
31031+ pax_force_retaddr
31032 ret
31033 CFI_ENDPROC
31034 ENDPROC(call_rwsem_downgrade_wake)
31035diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
31036index b30b5eb..2b57052 100644
31037--- a/arch/x86/lib/thunk_64.S
31038+++ b/arch/x86/lib/thunk_64.S
31039@@ -9,6 +9,7 @@
31040 #include <asm/dwarf2.h>
31041 #include <asm/calling.h>
31042 #include <asm/asm.h>
31043+#include <asm/alternative-asm.h>
31044
31045 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
31046 .macro THUNK name, func, put_ret_addr_in_rdi=0
31047@@ -16,11 +17,11 @@
31048 \name:
31049 CFI_STARTPROC
31050
31051- /* this one pushes 9 elems, the next one would be %rIP */
31052- SAVE_ARGS
31053+ /* this one pushes 15+1 elems, the next one would be %rIP */
31054+ SAVE_ARGS 8
31055
31056 .if \put_ret_addr_in_rdi
31057- movq_cfi_restore 9*8, rdi
31058+ movq_cfi_restore RIP, rdi
31059 .endif
31060
31061 call \func
31062@@ -47,9 +48,10 @@
31063
31064 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
31065 CFI_STARTPROC
31066- SAVE_ARGS
31067+ SAVE_ARGS 8
31068 restore:
31069- RESTORE_ARGS
31070+ RESTORE_ARGS 1,8
31071+ pax_force_retaddr
31072 ret
31073 CFI_ENDPROC
31074 _ASM_NOKPROBE(restore)
31075diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
31076index e2f5e21..4b22130 100644
31077--- a/arch/x86/lib/usercopy_32.c
31078+++ b/arch/x86/lib/usercopy_32.c
31079@@ -42,11 +42,13 @@ do { \
31080 int __d0; \
31081 might_fault(); \
31082 __asm__ __volatile__( \
31083+ __COPYUSER_SET_ES \
31084 ASM_STAC "\n" \
31085 "0: rep; stosl\n" \
31086 " movl %2,%0\n" \
31087 "1: rep; stosb\n" \
31088 "2: " ASM_CLAC "\n" \
31089+ __COPYUSER_RESTORE_ES \
31090 ".section .fixup,\"ax\"\n" \
31091 "3: lea 0(%2,%0,4),%0\n" \
31092 " jmp 2b\n" \
31093@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
31094
31095 #ifdef CONFIG_X86_INTEL_USERCOPY
31096 static unsigned long
31097-__copy_user_intel(void __user *to, const void *from, unsigned long size)
31098+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
31099 {
31100 int d0, d1;
31101 __asm__ __volatile__(
31102@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31103 " .align 2,0x90\n"
31104 "3: movl 0(%4), %%eax\n"
31105 "4: movl 4(%4), %%edx\n"
31106- "5: movl %%eax, 0(%3)\n"
31107- "6: movl %%edx, 4(%3)\n"
31108+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
31109+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
31110 "7: movl 8(%4), %%eax\n"
31111 "8: movl 12(%4),%%edx\n"
31112- "9: movl %%eax, 8(%3)\n"
31113- "10: movl %%edx, 12(%3)\n"
31114+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
31115+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
31116 "11: movl 16(%4), %%eax\n"
31117 "12: movl 20(%4), %%edx\n"
31118- "13: movl %%eax, 16(%3)\n"
31119- "14: movl %%edx, 20(%3)\n"
31120+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
31121+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
31122 "15: movl 24(%4), %%eax\n"
31123 "16: movl 28(%4), %%edx\n"
31124- "17: movl %%eax, 24(%3)\n"
31125- "18: movl %%edx, 28(%3)\n"
31126+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
31127+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
31128 "19: movl 32(%4), %%eax\n"
31129 "20: movl 36(%4), %%edx\n"
31130- "21: movl %%eax, 32(%3)\n"
31131- "22: movl %%edx, 36(%3)\n"
31132+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
31133+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
31134 "23: movl 40(%4), %%eax\n"
31135 "24: movl 44(%4), %%edx\n"
31136- "25: movl %%eax, 40(%3)\n"
31137- "26: movl %%edx, 44(%3)\n"
31138+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
31139+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
31140 "27: movl 48(%4), %%eax\n"
31141 "28: movl 52(%4), %%edx\n"
31142- "29: movl %%eax, 48(%3)\n"
31143- "30: movl %%edx, 52(%3)\n"
31144+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
31145+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
31146 "31: movl 56(%4), %%eax\n"
31147 "32: movl 60(%4), %%edx\n"
31148- "33: movl %%eax, 56(%3)\n"
31149- "34: movl %%edx, 60(%3)\n"
31150+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
31151+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
31152 " addl $-64, %0\n"
31153 " addl $64, %4\n"
31154 " addl $64, %3\n"
31155@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31156 " shrl $2, %0\n"
31157 " andl $3, %%eax\n"
31158 " cld\n"
31159+ __COPYUSER_SET_ES
31160 "99: rep; movsl\n"
31161 "36: movl %%eax, %0\n"
31162 "37: rep; movsb\n"
31163 "100:\n"
31164+ __COPYUSER_RESTORE_ES
31165+ ".section .fixup,\"ax\"\n"
31166+ "101: lea 0(%%eax,%0,4),%0\n"
31167+ " jmp 100b\n"
31168+ ".previous\n"
31169+ _ASM_EXTABLE(1b,100b)
31170+ _ASM_EXTABLE(2b,100b)
31171+ _ASM_EXTABLE(3b,100b)
31172+ _ASM_EXTABLE(4b,100b)
31173+ _ASM_EXTABLE(5b,100b)
31174+ _ASM_EXTABLE(6b,100b)
31175+ _ASM_EXTABLE(7b,100b)
31176+ _ASM_EXTABLE(8b,100b)
31177+ _ASM_EXTABLE(9b,100b)
31178+ _ASM_EXTABLE(10b,100b)
31179+ _ASM_EXTABLE(11b,100b)
31180+ _ASM_EXTABLE(12b,100b)
31181+ _ASM_EXTABLE(13b,100b)
31182+ _ASM_EXTABLE(14b,100b)
31183+ _ASM_EXTABLE(15b,100b)
31184+ _ASM_EXTABLE(16b,100b)
31185+ _ASM_EXTABLE(17b,100b)
31186+ _ASM_EXTABLE(18b,100b)
31187+ _ASM_EXTABLE(19b,100b)
31188+ _ASM_EXTABLE(20b,100b)
31189+ _ASM_EXTABLE(21b,100b)
31190+ _ASM_EXTABLE(22b,100b)
31191+ _ASM_EXTABLE(23b,100b)
31192+ _ASM_EXTABLE(24b,100b)
31193+ _ASM_EXTABLE(25b,100b)
31194+ _ASM_EXTABLE(26b,100b)
31195+ _ASM_EXTABLE(27b,100b)
31196+ _ASM_EXTABLE(28b,100b)
31197+ _ASM_EXTABLE(29b,100b)
31198+ _ASM_EXTABLE(30b,100b)
31199+ _ASM_EXTABLE(31b,100b)
31200+ _ASM_EXTABLE(32b,100b)
31201+ _ASM_EXTABLE(33b,100b)
31202+ _ASM_EXTABLE(34b,100b)
31203+ _ASM_EXTABLE(35b,100b)
31204+ _ASM_EXTABLE(36b,100b)
31205+ _ASM_EXTABLE(37b,100b)
31206+ _ASM_EXTABLE(99b,101b)
31207+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
31208+ : "1"(to), "2"(from), "0"(size)
31209+ : "eax", "edx", "memory");
31210+ return size;
31211+}
31212+
31213+static unsigned long
31214+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
31215+{
31216+ int d0, d1;
31217+ __asm__ __volatile__(
31218+ " .align 2,0x90\n"
31219+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
31220+ " cmpl $67, %0\n"
31221+ " jbe 3f\n"
31222+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
31223+ " .align 2,0x90\n"
31224+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
31225+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
31226+ "5: movl %%eax, 0(%3)\n"
31227+ "6: movl %%edx, 4(%3)\n"
31228+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
31229+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
31230+ "9: movl %%eax, 8(%3)\n"
31231+ "10: movl %%edx, 12(%3)\n"
31232+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
31233+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
31234+ "13: movl %%eax, 16(%3)\n"
31235+ "14: movl %%edx, 20(%3)\n"
31236+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
31237+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
31238+ "17: movl %%eax, 24(%3)\n"
31239+ "18: movl %%edx, 28(%3)\n"
31240+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
31241+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
31242+ "21: movl %%eax, 32(%3)\n"
31243+ "22: movl %%edx, 36(%3)\n"
31244+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
31245+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
31246+ "25: movl %%eax, 40(%3)\n"
31247+ "26: movl %%edx, 44(%3)\n"
31248+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
31249+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
31250+ "29: movl %%eax, 48(%3)\n"
31251+ "30: movl %%edx, 52(%3)\n"
31252+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
31253+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
31254+ "33: movl %%eax, 56(%3)\n"
31255+ "34: movl %%edx, 60(%3)\n"
31256+ " addl $-64, %0\n"
31257+ " addl $64, %4\n"
31258+ " addl $64, %3\n"
31259+ " cmpl $63, %0\n"
31260+ " ja 1b\n"
31261+ "35: movl %0, %%eax\n"
31262+ " shrl $2, %0\n"
31263+ " andl $3, %%eax\n"
31264+ " cld\n"
31265+ "99: rep; "__copyuser_seg" movsl\n"
31266+ "36: movl %%eax, %0\n"
31267+ "37: rep; "__copyuser_seg" movsb\n"
31268+ "100:\n"
31269 ".section .fixup,\"ax\"\n"
31270 "101: lea 0(%%eax,%0,4),%0\n"
31271 " jmp 100b\n"
31272@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31273 int d0, d1;
31274 __asm__ __volatile__(
31275 " .align 2,0x90\n"
31276- "0: movl 32(%4), %%eax\n"
31277+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31278 " cmpl $67, %0\n"
31279 " jbe 2f\n"
31280- "1: movl 64(%4), %%eax\n"
31281+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31282 " .align 2,0x90\n"
31283- "2: movl 0(%4), %%eax\n"
31284- "21: movl 4(%4), %%edx\n"
31285+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31286+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31287 " movl %%eax, 0(%3)\n"
31288 " movl %%edx, 4(%3)\n"
31289- "3: movl 8(%4), %%eax\n"
31290- "31: movl 12(%4),%%edx\n"
31291+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31292+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31293 " movl %%eax, 8(%3)\n"
31294 " movl %%edx, 12(%3)\n"
31295- "4: movl 16(%4), %%eax\n"
31296- "41: movl 20(%4), %%edx\n"
31297+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31298+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31299 " movl %%eax, 16(%3)\n"
31300 " movl %%edx, 20(%3)\n"
31301- "10: movl 24(%4), %%eax\n"
31302- "51: movl 28(%4), %%edx\n"
31303+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31304+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31305 " movl %%eax, 24(%3)\n"
31306 " movl %%edx, 28(%3)\n"
31307- "11: movl 32(%4), %%eax\n"
31308- "61: movl 36(%4), %%edx\n"
31309+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31310+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31311 " movl %%eax, 32(%3)\n"
31312 " movl %%edx, 36(%3)\n"
31313- "12: movl 40(%4), %%eax\n"
31314- "71: movl 44(%4), %%edx\n"
31315+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31316+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31317 " movl %%eax, 40(%3)\n"
31318 " movl %%edx, 44(%3)\n"
31319- "13: movl 48(%4), %%eax\n"
31320- "81: movl 52(%4), %%edx\n"
31321+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31322+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31323 " movl %%eax, 48(%3)\n"
31324 " movl %%edx, 52(%3)\n"
31325- "14: movl 56(%4), %%eax\n"
31326- "91: movl 60(%4), %%edx\n"
31327+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31328+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31329 " movl %%eax, 56(%3)\n"
31330 " movl %%edx, 60(%3)\n"
31331 " addl $-64, %0\n"
31332@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31333 " shrl $2, %0\n"
31334 " andl $3, %%eax\n"
31335 " cld\n"
31336- "6: rep; movsl\n"
31337+ "6: rep; "__copyuser_seg" movsl\n"
31338 " movl %%eax,%0\n"
31339- "7: rep; movsb\n"
31340+ "7: rep; "__copyuser_seg" movsb\n"
31341 "8:\n"
31342 ".section .fixup,\"ax\"\n"
31343 "9: lea 0(%%eax,%0,4),%0\n"
31344@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31345
31346 __asm__ __volatile__(
31347 " .align 2,0x90\n"
31348- "0: movl 32(%4), %%eax\n"
31349+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31350 " cmpl $67, %0\n"
31351 " jbe 2f\n"
31352- "1: movl 64(%4), %%eax\n"
31353+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31354 " .align 2,0x90\n"
31355- "2: movl 0(%4), %%eax\n"
31356- "21: movl 4(%4), %%edx\n"
31357+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31358+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31359 " movnti %%eax, 0(%3)\n"
31360 " movnti %%edx, 4(%3)\n"
31361- "3: movl 8(%4), %%eax\n"
31362- "31: movl 12(%4),%%edx\n"
31363+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31364+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31365 " movnti %%eax, 8(%3)\n"
31366 " movnti %%edx, 12(%3)\n"
31367- "4: movl 16(%4), %%eax\n"
31368- "41: movl 20(%4), %%edx\n"
31369+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31370+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31371 " movnti %%eax, 16(%3)\n"
31372 " movnti %%edx, 20(%3)\n"
31373- "10: movl 24(%4), %%eax\n"
31374- "51: movl 28(%4), %%edx\n"
31375+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31376+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31377 " movnti %%eax, 24(%3)\n"
31378 " movnti %%edx, 28(%3)\n"
31379- "11: movl 32(%4), %%eax\n"
31380- "61: movl 36(%4), %%edx\n"
31381+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31382+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31383 " movnti %%eax, 32(%3)\n"
31384 " movnti %%edx, 36(%3)\n"
31385- "12: movl 40(%4), %%eax\n"
31386- "71: movl 44(%4), %%edx\n"
31387+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31388+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31389 " movnti %%eax, 40(%3)\n"
31390 " movnti %%edx, 44(%3)\n"
31391- "13: movl 48(%4), %%eax\n"
31392- "81: movl 52(%4), %%edx\n"
31393+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31394+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31395 " movnti %%eax, 48(%3)\n"
31396 " movnti %%edx, 52(%3)\n"
31397- "14: movl 56(%4), %%eax\n"
31398- "91: movl 60(%4), %%edx\n"
31399+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31400+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31401 " movnti %%eax, 56(%3)\n"
31402 " movnti %%edx, 60(%3)\n"
31403 " addl $-64, %0\n"
31404@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31405 " shrl $2, %0\n"
31406 " andl $3, %%eax\n"
31407 " cld\n"
31408- "6: rep; movsl\n"
31409+ "6: rep; "__copyuser_seg" movsl\n"
31410 " movl %%eax,%0\n"
31411- "7: rep; movsb\n"
31412+ "7: rep; "__copyuser_seg" movsb\n"
31413 "8:\n"
31414 ".section .fixup,\"ax\"\n"
31415 "9: lea 0(%%eax,%0,4),%0\n"
31416@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
31417
31418 __asm__ __volatile__(
31419 " .align 2,0x90\n"
31420- "0: movl 32(%4), %%eax\n"
31421+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31422 " cmpl $67, %0\n"
31423 " jbe 2f\n"
31424- "1: movl 64(%4), %%eax\n"
31425+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31426 " .align 2,0x90\n"
31427- "2: movl 0(%4), %%eax\n"
31428- "21: movl 4(%4), %%edx\n"
31429+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31430+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31431 " movnti %%eax, 0(%3)\n"
31432 " movnti %%edx, 4(%3)\n"
31433- "3: movl 8(%4), %%eax\n"
31434- "31: movl 12(%4),%%edx\n"
31435+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31436+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31437 " movnti %%eax, 8(%3)\n"
31438 " movnti %%edx, 12(%3)\n"
31439- "4: movl 16(%4), %%eax\n"
31440- "41: movl 20(%4), %%edx\n"
31441+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31442+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31443 " movnti %%eax, 16(%3)\n"
31444 " movnti %%edx, 20(%3)\n"
31445- "10: movl 24(%4), %%eax\n"
31446- "51: movl 28(%4), %%edx\n"
31447+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31448+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31449 " movnti %%eax, 24(%3)\n"
31450 " movnti %%edx, 28(%3)\n"
31451- "11: movl 32(%4), %%eax\n"
31452- "61: movl 36(%4), %%edx\n"
31453+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31454+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31455 " movnti %%eax, 32(%3)\n"
31456 " movnti %%edx, 36(%3)\n"
31457- "12: movl 40(%4), %%eax\n"
31458- "71: movl 44(%4), %%edx\n"
31459+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31460+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31461 " movnti %%eax, 40(%3)\n"
31462 " movnti %%edx, 44(%3)\n"
31463- "13: movl 48(%4), %%eax\n"
31464- "81: movl 52(%4), %%edx\n"
31465+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31466+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31467 " movnti %%eax, 48(%3)\n"
31468 " movnti %%edx, 52(%3)\n"
31469- "14: movl 56(%4), %%eax\n"
31470- "91: movl 60(%4), %%edx\n"
31471+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31472+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31473 " movnti %%eax, 56(%3)\n"
31474 " movnti %%edx, 60(%3)\n"
31475 " addl $-64, %0\n"
31476@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
31477 " shrl $2, %0\n"
31478 " andl $3, %%eax\n"
31479 " cld\n"
31480- "6: rep; movsl\n"
31481+ "6: rep; "__copyuser_seg" movsl\n"
31482 " movl %%eax,%0\n"
31483- "7: rep; movsb\n"
31484+ "7: rep; "__copyuser_seg" movsb\n"
31485 "8:\n"
31486 ".section .fixup,\"ax\"\n"
31487 "9: lea 0(%%eax,%0,4),%0\n"
31488@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
31489 */
31490 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
31491 unsigned long size);
31492-unsigned long __copy_user_intel(void __user *to, const void *from,
31493+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
31494+ unsigned long size);
31495+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
31496 unsigned long size);
31497 unsigned long __copy_user_zeroing_intel_nocache(void *to,
31498 const void __user *from, unsigned long size);
31499 #endif /* CONFIG_X86_INTEL_USERCOPY */
31500
31501 /* Generic arbitrary sized copy. */
31502-#define __copy_user(to, from, size) \
31503+#define __copy_user(to, from, size, prefix, set, restore) \
31504 do { \
31505 int __d0, __d1, __d2; \
31506 __asm__ __volatile__( \
31507+ set \
31508 " cmp $7,%0\n" \
31509 " jbe 1f\n" \
31510 " movl %1,%0\n" \
31511 " negl %0\n" \
31512 " andl $7,%0\n" \
31513 " subl %0,%3\n" \
31514- "4: rep; movsb\n" \
31515+ "4: rep; "prefix"movsb\n" \
31516 " movl %3,%0\n" \
31517 " shrl $2,%0\n" \
31518 " andl $3,%3\n" \
31519 " .align 2,0x90\n" \
31520- "0: rep; movsl\n" \
31521+ "0: rep; "prefix"movsl\n" \
31522 " movl %3,%0\n" \
31523- "1: rep; movsb\n" \
31524+ "1: rep; "prefix"movsb\n" \
31525 "2:\n" \
31526+ restore \
31527 ".section .fixup,\"ax\"\n" \
31528 "5: addl %3,%0\n" \
31529 " jmp 2b\n" \
31530@@ -538,14 +650,14 @@ do { \
31531 " negl %0\n" \
31532 " andl $7,%0\n" \
31533 " subl %0,%3\n" \
31534- "4: rep; movsb\n" \
31535+ "4: rep; "__copyuser_seg"movsb\n" \
31536 " movl %3,%0\n" \
31537 " shrl $2,%0\n" \
31538 " andl $3,%3\n" \
31539 " .align 2,0x90\n" \
31540- "0: rep; movsl\n" \
31541+ "0: rep; "__copyuser_seg"movsl\n" \
31542 " movl %3,%0\n" \
31543- "1: rep; movsb\n" \
31544+ "1: rep; "__copyuser_seg"movsb\n" \
31545 "2:\n" \
31546 ".section .fixup,\"ax\"\n" \
31547 "5: addl %3,%0\n" \
31548@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
31549 {
31550 stac();
31551 if (movsl_is_ok(to, from, n))
31552- __copy_user(to, from, n);
31553+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
31554 else
31555- n = __copy_user_intel(to, from, n);
31556+ n = __generic_copy_to_user_intel(to, from, n);
31557 clac();
31558 return n;
31559 }
31560@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
31561 {
31562 stac();
31563 if (movsl_is_ok(to, from, n))
31564- __copy_user(to, from, n);
31565+ __copy_user(to, from, n, __copyuser_seg, "", "");
31566 else
31567- n = __copy_user_intel((void __user *)to,
31568- (const void *)from, n);
31569+ n = __generic_copy_from_user_intel(to, from, n);
31570 clac();
31571 return n;
31572 }
31573@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
31574 if (n > 64 && cpu_has_xmm2)
31575 n = __copy_user_intel_nocache(to, from, n);
31576 else
31577- __copy_user(to, from, n);
31578+ __copy_user(to, from, n, __copyuser_seg, "", "");
31579 #else
31580- __copy_user(to, from, n);
31581+ __copy_user(to, from, n, __copyuser_seg, "", "");
31582 #endif
31583 clac();
31584 return n;
31585 }
31586 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
31587
31588-/**
31589- * copy_to_user: - Copy a block of data into user space.
31590- * @to: Destination address, in user space.
31591- * @from: Source address, in kernel space.
31592- * @n: Number of bytes to copy.
31593- *
31594- * Context: User context only. This function may sleep.
31595- *
31596- * Copy data from kernel space to user space.
31597- *
31598- * Returns number of bytes that could not be copied.
31599- * On success, this will be zero.
31600- */
31601-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
31602+#ifdef CONFIG_PAX_MEMORY_UDEREF
31603+void __set_fs(mm_segment_t x)
31604 {
31605- if (access_ok(VERIFY_WRITE, to, n))
31606- n = __copy_to_user(to, from, n);
31607- return n;
31608+ switch (x.seg) {
31609+ case 0:
31610+ loadsegment(gs, 0);
31611+ break;
31612+ case TASK_SIZE_MAX:
31613+ loadsegment(gs, __USER_DS);
31614+ break;
31615+ case -1UL:
31616+ loadsegment(gs, __KERNEL_DS);
31617+ break;
31618+ default:
31619+ BUG();
31620+ }
31621 }
31622-EXPORT_SYMBOL(_copy_to_user);
31623+EXPORT_SYMBOL(__set_fs);
31624
31625-/**
31626- * copy_from_user: - Copy a block of data from user space.
31627- * @to: Destination address, in kernel space.
31628- * @from: Source address, in user space.
31629- * @n: Number of bytes to copy.
31630- *
31631- * Context: User context only. This function may sleep.
31632- *
31633- * Copy data from user space to kernel space.
31634- *
31635- * Returns number of bytes that could not be copied.
31636- * On success, this will be zero.
31637- *
31638- * If some data could not be copied, this function will pad the copied
31639- * data to the requested size using zero bytes.
31640- */
31641-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
31642+void set_fs(mm_segment_t x)
31643 {
31644- if (access_ok(VERIFY_READ, from, n))
31645- n = __copy_from_user(to, from, n);
31646- else
31647- memset(to, 0, n);
31648- return n;
31649+ current_thread_info()->addr_limit = x;
31650+ __set_fs(x);
31651 }
31652-EXPORT_SYMBOL(_copy_from_user);
31653+EXPORT_SYMBOL(set_fs);
31654+#endif
31655diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
31656index 0a42327..7a82465 100644
31657--- a/arch/x86/lib/usercopy_64.c
31658+++ b/arch/x86/lib/usercopy_64.c
31659@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31660 might_fault();
31661 /* no memory constraint because it doesn't change any memory gcc knows
31662 about */
31663+ pax_open_userland();
31664 stac();
31665 asm volatile(
31666 " testq %[size8],%[size8]\n"
31667@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31668 _ASM_EXTABLE(0b,3b)
31669 _ASM_EXTABLE(1b,2b)
31670 : [size8] "=&c"(size), [dst] "=&D" (__d0)
31671- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
31672+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
31673 [zero] "r" (0UL), [eight] "r" (8UL));
31674 clac();
31675+ pax_close_userland();
31676 return size;
31677 }
31678 EXPORT_SYMBOL(__clear_user);
31679@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
31680 }
31681 EXPORT_SYMBOL(clear_user);
31682
31683-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
31684+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
31685 {
31686- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
31687- return copy_user_generic((__force void *)to, (__force void *)from, len);
31688- }
31689- return len;
31690+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
31691+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
31692+ return len;
31693 }
31694 EXPORT_SYMBOL(copy_in_user);
31695
31696@@ -69,8 +70,10 @@ EXPORT_SYMBOL(copy_in_user);
31697 * it is not necessary to optimize tail handling.
31698 */
31699 __visible unsigned long
31700-copy_user_handle_tail(char *to, char *from, unsigned len)
31701+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len)
31702 {
31703+ clac();
31704+ pax_close_userland();
31705 for (; len; --len, to++) {
31706 char c;
31707
31708@@ -79,10 +82,9 @@ copy_user_handle_tail(char *to, char *from, unsigned len)
31709 if (__put_user_nocheck(c, to, sizeof(char)))
31710 break;
31711 }
31712- clac();
31713
31714 /* If the destination is a kernel buffer, we always clear the end */
31715- if (!__addr_ok(to))
31716+ if (!__addr_ok(to) && (unsigned long)to >= TASK_SIZE_MAX + pax_user_shadow_base)
31717 memset(to, 0, len);
31718 return len;
31719 }
31720diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
31721index c4cc740..60a7362 100644
31722--- a/arch/x86/mm/Makefile
31723+++ b/arch/x86/mm/Makefile
31724@@ -35,3 +35,7 @@ obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
31725 obj-$(CONFIG_MEMTEST) += memtest.o
31726
31727 obj-$(CONFIG_X86_INTEL_MPX) += mpx.o
31728+
31729+quote:="
31730+obj-$(CONFIG_X86_64) += uderef_64.o
31731+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
31732diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
31733index 903ec1e..c4166b2 100644
31734--- a/arch/x86/mm/extable.c
31735+++ b/arch/x86/mm/extable.c
31736@@ -6,12 +6,24 @@
31737 static inline unsigned long
31738 ex_insn_addr(const struct exception_table_entry *x)
31739 {
31740- return (unsigned long)&x->insn + x->insn;
31741+ unsigned long reloc = 0;
31742+
31743+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31744+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31745+#endif
31746+
31747+ return (unsigned long)&x->insn + x->insn + reloc;
31748 }
31749 static inline unsigned long
31750 ex_fixup_addr(const struct exception_table_entry *x)
31751 {
31752- return (unsigned long)&x->fixup + x->fixup;
31753+ unsigned long reloc = 0;
31754+
31755+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31756+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31757+#endif
31758+
31759+ return (unsigned long)&x->fixup + x->fixup + reloc;
31760 }
31761
31762 int fixup_exception(struct pt_regs *regs)
31763@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
31764 unsigned long new_ip;
31765
31766 #ifdef CONFIG_PNPBIOS
31767- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
31768+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
31769 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
31770 extern u32 pnp_bios_is_utter_crap;
31771 pnp_bios_is_utter_crap = 1;
31772@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
31773 i += 4;
31774 p->fixup -= i;
31775 i += 4;
31776+
31777+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31778+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
31779+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31780+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31781+#endif
31782+
31783 }
31784 }
31785
31786diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
31787index ede025f..380466b 100644
31788--- a/arch/x86/mm/fault.c
31789+++ b/arch/x86/mm/fault.c
31790@@ -13,12 +13,19 @@
31791 #include <linux/hugetlb.h> /* hstate_index_to_shift */
31792 #include <linux/prefetch.h> /* prefetchw */
31793 #include <linux/context_tracking.h> /* exception_enter(), ... */
31794+#include <linux/unistd.h>
31795+#include <linux/compiler.h>
31796
31797 #include <asm/traps.h> /* dotraplinkage, ... */
31798 #include <asm/pgalloc.h> /* pgd_*(), ... */
31799 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
31800 #include <asm/fixmap.h> /* VSYSCALL_ADDR */
31801 #include <asm/vsyscall.h> /* emulate_vsyscall */
31802+#include <asm/tlbflush.h>
31803+
31804+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31805+#include <asm/stacktrace.h>
31806+#endif
31807
31808 #define CREATE_TRACE_POINTS
31809 #include <asm/trace/exceptions.h>
31810@@ -59,7 +66,7 @@ static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
31811 int ret = 0;
31812
31813 /* kprobe_running() needs smp_processor_id() */
31814- if (kprobes_built_in() && !user_mode_vm(regs)) {
31815+ if (kprobes_built_in() && !user_mode(regs)) {
31816 preempt_disable();
31817 if (kprobe_running() && kprobe_fault_handler(regs, 14))
31818 ret = 1;
31819@@ -120,7 +127,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
31820 return !instr_lo || (instr_lo>>1) == 1;
31821 case 0x00:
31822 /* Prefetch instruction is 0x0F0D or 0x0F18 */
31823- if (probe_kernel_address(instr, opcode))
31824+ if (user_mode(regs)) {
31825+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31826+ return 0;
31827+ } else if (probe_kernel_address(instr, opcode))
31828 return 0;
31829
31830 *prefetch = (instr_lo == 0xF) &&
31831@@ -154,7 +164,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
31832 while (instr < max_instr) {
31833 unsigned char opcode;
31834
31835- if (probe_kernel_address(instr, opcode))
31836+ if (user_mode(regs)) {
31837+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31838+ break;
31839+ } else if (probe_kernel_address(instr, opcode))
31840 break;
31841
31842 instr++;
31843@@ -185,6 +198,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
31844 force_sig_info(si_signo, &info, tsk);
31845 }
31846
31847+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31848+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
31849+#endif
31850+
31851+#ifdef CONFIG_PAX_EMUTRAMP
31852+static int pax_handle_fetch_fault(struct pt_regs *regs);
31853+#endif
31854+
31855+#ifdef CONFIG_PAX_PAGEEXEC
31856+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
31857+{
31858+ pgd_t *pgd;
31859+ pud_t *pud;
31860+ pmd_t *pmd;
31861+
31862+ pgd = pgd_offset(mm, address);
31863+ if (!pgd_present(*pgd))
31864+ return NULL;
31865+ pud = pud_offset(pgd, address);
31866+ if (!pud_present(*pud))
31867+ return NULL;
31868+ pmd = pmd_offset(pud, address);
31869+ if (!pmd_present(*pmd))
31870+ return NULL;
31871+ return pmd;
31872+}
31873+#endif
31874+
31875 DEFINE_SPINLOCK(pgd_lock);
31876 LIST_HEAD(pgd_list);
31877
31878@@ -235,10 +276,27 @@ void vmalloc_sync_all(void)
31879 for (address = VMALLOC_START & PMD_MASK;
31880 address >= TASK_SIZE && address < FIXADDR_TOP;
31881 address += PMD_SIZE) {
31882+
31883+#ifdef CONFIG_PAX_PER_CPU_PGD
31884+ unsigned long cpu;
31885+#else
31886 struct page *page;
31887+#endif
31888
31889 spin_lock(&pgd_lock);
31890+
31891+#ifdef CONFIG_PAX_PER_CPU_PGD
31892+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
31893+ pgd_t *pgd = get_cpu_pgd(cpu, user);
31894+ pmd_t *ret;
31895+
31896+ ret = vmalloc_sync_one(pgd, address);
31897+ if (!ret)
31898+ break;
31899+ pgd = get_cpu_pgd(cpu, kernel);
31900+#else
31901 list_for_each_entry(page, &pgd_list, lru) {
31902+ pgd_t *pgd;
31903 spinlock_t *pgt_lock;
31904 pmd_t *ret;
31905
31906@@ -246,8 +304,14 @@ void vmalloc_sync_all(void)
31907 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
31908
31909 spin_lock(pgt_lock);
31910- ret = vmalloc_sync_one(page_address(page), address);
31911+ pgd = page_address(page);
31912+#endif
31913+
31914+ ret = vmalloc_sync_one(pgd, address);
31915+
31916+#ifndef CONFIG_PAX_PER_CPU_PGD
31917 spin_unlock(pgt_lock);
31918+#endif
31919
31920 if (!ret)
31921 break;
31922@@ -281,6 +345,12 @@ static noinline int vmalloc_fault(unsigned long address)
31923 * an interrupt in the middle of a task switch..
31924 */
31925 pgd_paddr = read_cr3();
31926+
31927+#ifdef CONFIG_PAX_PER_CPU_PGD
31928+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
31929+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
31930+#endif
31931+
31932 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
31933 if (!pmd_k)
31934 return -1;
31935@@ -377,11 +447,25 @@ static noinline int vmalloc_fault(unsigned long address)
31936 * happen within a race in page table update. In the later
31937 * case just flush:
31938 */
31939- pgd = pgd_offset(current->active_mm, address);
31940+
31941 pgd_ref = pgd_offset_k(address);
31942 if (pgd_none(*pgd_ref))
31943 return -1;
31944
31945+#ifdef CONFIG_PAX_PER_CPU_PGD
31946+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
31947+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
31948+ if (pgd_none(*pgd)) {
31949+ set_pgd(pgd, *pgd_ref);
31950+ arch_flush_lazy_mmu_mode();
31951+ } else {
31952+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
31953+ }
31954+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
31955+#else
31956+ pgd = pgd_offset(current->active_mm, address);
31957+#endif
31958+
31959 if (pgd_none(*pgd)) {
31960 set_pgd(pgd, *pgd_ref);
31961 arch_flush_lazy_mmu_mode();
31962@@ -548,7 +632,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
31963 static int is_errata100(struct pt_regs *regs, unsigned long address)
31964 {
31965 #ifdef CONFIG_X86_64
31966- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
31967+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
31968 return 1;
31969 #endif
31970 return 0;
31971@@ -575,9 +659,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
31972 }
31973
31974 static const char nx_warning[] = KERN_CRIT
31975-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
31976+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
31977 static const char smep_warning[] = KERN_CRIT
31978-"unable to execute userspace code (SMEP?) (uid: %d)\n";
31979+"unable to execute userspace code (SMEP?) (uid: %d, task: %s, pid: %d)\n";
31980
31981 static void
31982 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31983@@ -586,7 +670,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31984 if (!oops_may_print())
31985 return;
31986
31987- if (error_code & PF_INSTR) {
31988+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
31989 unsigned int level;
31990 pgd_t *pgd;
31991 pte_t *pte;
31992@@ -597,13 +681,25 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31993 pte = lookup_address_in_pgd(pgd, address, &level);
31994
31995 if (pte && pte_present(*pte) && !pte_exec(*pte))
31996- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
31997+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
31998 if (pte && pte_present(*pte) && pte_exec(*pte) &&
31999 (pgd_flags(*pgd) & _PAGE_USER) &&
32000 (__read_cr4() & X86_CR4_SMEP))
32001- printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
32002+ printk(smep_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
32003 }
32004
32005+#ifdef CONFIG_PAX_KERNEXEC
32006+ if (init_mm.start_code <= address && address < init_mm.end_code) {
32007+ if (current->signal->curr_ip)
32008+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
32009+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
32010+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32011+ else
32012+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
32013+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32014+ }
32015+#endif
32016+
32017 printk(KERN_ALERT "BUG: unable to handle kernel ");
32018 if (address < PAGE_SIZE)
32019 printk(KERN_CONT "NULL pointer dereference");
32020@@ -782,6 +878,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
32021 return;
32022 }
32023 #endif
32024+
32025+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32026+ if (pax_is_fetch_fault(regs, error_code, address)) {
32027+
32028+#ifdef CONFIG_PAX_EMUTRAMP
32029+ switch (pax_handle_fetch_fault(regs)) {
32030+ case 2:
32031+ return;
32032+ }
32033+#endif
32034+
32035+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32036+ do_group_exit(SIGKILL);
32037+ }
32038+#endif
32039+
32040 /* Kernel addresses are always protection faults: */
32041 if (address >= TASK_SIZE)
32042 error_code |= PF_PROT;
32043@@ -864,7 +976,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
32044 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
32045 printk(KERN_ERR
32046 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
32047- tsk->comm, tsk->pid, address);
32048+ tsk->comm, task_pid_nr(tsk), address);
32049 code = BUS_MCEERR_AR;
32050 }
32051 #endif
32052@@ -916,6 +1028,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
32053 return 1;
32054 }
32055
32056+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32057+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
32058+{
32059+ pte_t *pte;
32060+ pmd_t *pmd;
32061+ spinlock_t *ptl;
32062+ unsigned char pte_mask;
32063+
32064+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
32065+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
32066+ return 0;
32067+
32068+ /* PaX: it's our fault, let's handle it if we can */
32069+
32070+ /* PaX: take a look at read faults before acquiring any locks */
32071+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
32072+ /* instruction fetch attempt from a protected page in user mode */
32073+ up_read(&mm->mmap_sem);
32074+
32075+#ifdef CONFIG_PAX_EMUTRAMP
32076+ switch (pax_handle_fetch_fault(regs)) {
32077+ case 2:
32078+ return 1;
32079+ }
32080+#endif
32081+
32082+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32083+ do_group_exit(SIGKILL);
32084+ }
32085+
32086+ pmd = pax_get_pmd(mm, address);
32087+ if (unlikely(!pmd))
32088+ return 0;
32089+
32090+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
32091+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
32092+ pte_unmap_unlock(pte, ptl);
32093+ return 0;
32094+ }
32095+
32096+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
32097+ /* write attempt to a protected page in user mode */
32098+ pte_unmap_unlock(pte, ptl);
32099+ return 0;
32100+ }
32101+
32102+#ifdef CONFIG_SMP
32103+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
32104+#else
32105+ if (likely(address > get_limit(regs->cs)))
32106+#endif
32107+ {
32108+ set_pte(pte, pte_mkread(*pte));
32109+ __flush_tlb_one(address);
32110+ pte_unmap_unlock(pte, ptl);
32111+ up_read(&mm->mmap_sem);
32112+ return 1;
32113+ }
32114+
32115+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
32116+
32117+ /*
32118+ * PaX: fill DTLB with user rights and retry
32119+ */
32120+ __asm__ __volatile__ (
32121+ "orb %2,(%1)\n"
32122+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
32123+/*
32124+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
32125+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
32126+ * page fault when examined during a TLB load attempt. this is true not only
32127+ * for PTEs holding a non-present entry but also present entries that will
32128+ * raise a page fault (such as those set up by PaX, or the copy-on-write
32129+ * mechanism). in effect it means that we do *not* need to flush the TLBs
32130+ * for our target pages since their PTEs are simply not in the TLBs at all.
32131+
32132+ * the best thing in omitting it is that we gain around 15-20% speed in the
32133+ * fast path of the page fault handler and can get rid of tracing since we
32134+ * can no longer flush unintended entries.
32135+ */
32136+ "invlpg (%0)\n"
32137+#endif
32138+ __copyuser_seg"testb $0,(%0)\n"
32139+ "xorb %3,(%1)\n"
32140+ :
32141+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
32142+ : "memory", "cc");
32143+ pte_unmap_unlock(pte, ptl);
32144+ up_read(&mm->mmap_sem);
32145+ return 1;
32146+}
32147+#endif
32148+
32149 /*
32150 * Handle a spurious fault caused by a stale TLB entry.
32151 *
32152@@ -1001,6 +1206,9 @@ int show_unhandled_signals = 1;
32153 static inline int
32154 access_error(unsigned long error_code, struct vm_area_struct *vma)
32155 {
32156+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
32157+ return 1;
32158+
32159 if (error_code & PF_WRITE) {
32160 /* write, present and write, not present: */
32161 if (unlikely(!(vma->vm_flags & VM_WRITE)))
32162@@ -1035,7 +1243,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
32163 if (error_code & PF_USER)
32164 return false;
32165
32166- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
32167+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
32168 return false;
32169
32170 return true;
32171@@ -1063,6 +1271,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32172 tsk = current;
32173 mm = tsk->mm;
32174
32175+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32176+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
32177+ if (!search_exception_tables(regs->ip)) {
32178+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32179+ bad_area_nosemaphore(regs, error_code, address);
32180+ return;
32181+ }
32182+ if (address < pax_user_shadow_base) {
32183+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32184+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
32185+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
32186+ } else
32187+ address -= pax_user_shadow_base;
32188+ }
32189+#endif
32190+
32191 /*
32192 * Detect and handle instructions that would cause a page fault for
32193 * both a tracked kernel page and a userspace page.
32194@@ -1140,7 +1364,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32195 * User-mode registers count as a user access even for any
32196 * potential system fault or CPU buglet:
32197 */
32198- if (user_mode_vm(regs)) {
32199+ if (user_mode(regs)) {
32200 local_irq_enable();
32201 error_code |= PF_USER;
32202 flags |= FAULT_FLAG_USER;
32203@@ -1187,6 +1411,11 @@ retry:
32204 might_sleep();
32205 }
32206
32207+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32208+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
32209+ return;
32210+#endif
32211+
32212 vma = find_vma(mm, address);
32213 if (unlikely(!vma)) {
32214 bad_area(regs, error_code, address);
32215@@ -1198,18 +1427,24 @@ retry:
32216 bad_area(regs, error_code, address);
32217 return;
32218 }
32219- if (error_code & PF_USER) {
32220- /*
32221- * Accessing the stack below %sp is always a bug.
32222- * The large cushion allows instructions like enter
32223- * and pusha to work. ("enter $65535, $31" pushes
32224- * 32 pointers and then decrements %sp by 65535.)
32225- */
32226- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
32227- bad_area(regs, error_code, address);
32228- return;
32229- }
32230+ /*
32231+ * Accessing the stack below %sp is always a bug.
32232+ * The large cushion allows instructions like enter
32233+ * and pusha to work. ("enter $65535, $31" pushes
32234+ * 32 pointers and then decrements %sp by 65535.)
32235+ */
32236+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
32237+ bad_area(regs, error_code, address);
32238+ return;
32239 }
32240+
32241+#ifdef CONFIG_PAX_SEGMEXEC
32242+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
32243+ bad_area(regs, error_code, address);
32244+ return;
32245+ }
32246+#endif
32247+
32248 if (unlikely(expand_stack(vma, address))) {
32249 bad_area(regs, error_code, address);
32250 return;
32251@@ -1329,3 +1564,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
32252 }
32253 NOKPROBE_SYMBOL(trace_do_page_fault);
32254 #endif /* CONFIG_TRACING */
32255+
32256+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32257+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
32258+{
32259+ struct mm_struct *mm = current->mm;
32260+ unsigned long ip = regs->ip;
32261+
32262+ if (v8086_mode(regs))
32263+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
32264+
32265+#ifdef CONFIG_PAX_PAGEEXEC
32266+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
32267+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
32268+ return true;
32269+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
32270+ return true;
32271+ return false;
32272+ }
32273+#endif
32274+
32275+#ifdef CONFIG_PAX_SEGMEXEC
32276+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
32277+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
32278+ return true;
32279+ return false;
32280+ }
32281+#endif
32282+
32283+ return false;
32284+}
32285+#endif
32286+
32287+#ifdef CONFIG_PAX_EMUTRAMP
32288+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
32289+{
32290+ int err;
32291+
32292+ do { /* PaX: libffi trampoline emulation */
32293+ unsigned char mov, jmp;
32294+ unsigned int addr1, addr2;
32295+
32296+#ifdef CONFIG_X86_64
32297+ if ((regs->ip + 9) >> 32)
32298+ break;
32299+#endif
32300+
32301+ err = get_user(mov, (unsigned char __user *)regs->ip);
32302+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32303+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32304+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32305+
32306+ if (err)
32307+ break;
32308+
32309+ if (mov == 0xB8 && jmp == 0xE9) {
32310+ regs->ax = addr1;
32311+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32312+ return 2;
32313+ }
32314+ } while (0);
32315+
32316+ do { /* PaX: gcc trampoline emulation #1 */
32317+ unsigned char mov1, mov2;
32318+ unsigned short jmp;
32319+ unsigned int addr1, addr2;
32320+
32321+#ifdef CONFIG_X86_64
32322+ if ((regs->ip + 11) >> 32)
32323+ break;
32324+#endif
32325+
32326+ err = get_user(mov1, (unsigned char __user *)regs->ip);
32327+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32328+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
32329+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32330+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
32331+
32332+ if (err)
32333+ break;
32334+
32335+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
32336+ regs->cx = addr1;
32337+ regs->ax = addr2;
32338+ regs->ip = addr2;
32339+ return 2;
32340+ }
32341+ } while (0);
32342+
32343+ do { /* PaX: gcc trampoline emulation #2 */
32344+ unsigned char mov, jmp;
32345+ unsigned int addr1, addr2;
32346+
32347+#ifdef CONFIG_X86_64
32348+ if ((regs->ip + 9) >> 32)
32349+ break;
32350+#endif
32351+
32352+ err = get_user(mov, (unsigned char __user *)regs->ip);
32353+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32354+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32355+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32356+
32357+ if (err)
32358+ break;
32359+
32360+ if (mov == 0xB9 && jmp == 0xE9) {
32361+ regs->cx = addr1;
32362+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32363+ return 2;
32364+ }
32365+ } while (0);
32366+
32367+ return 1; /* PaX in action */
32368+}
32369+
32370+#ifdef CONFIG_X86_64
32371+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
32372+{
32373+ int err;
32374+
32375+ do { /* PaX: libffi trampoline emulation */
32376+ unsigned short mov1, mov2, jmp1;
32377+ unsigned char stcclc, jmp2;
32378+ unsigned long addr1, addr2;
32379+
32380+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32381+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32382+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32383+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32384+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
32385+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
32386+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
32387+
32388+ if (err)
32389+ break;
32390+
32391+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32392+ regs->r11 = addr1;
32393+ regs->r10 = addr2;
32394+ if (stcclc == 0xF8)
32395+ regs->flags &= ~X86_EFLAGS_CF;
32396+ else
32397+ regs->flags |= X86_EFLAGS_CF;
32398+ regs->ip = addr1;
32399+ return 2;
32400+ }
32401+ } while (0);
32402+
32403+ do { /* PaX: gcc trampoline emulation #1 */
32404+ unsigned short mov1, mov2, jmp1;
32405+ unsigned char jmp2;
32406+ unsigned int addr1;
32407+ unsigned long addr2;
32408+
32409+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32410+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
32411+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
32412+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
32413+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
32414+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
32415+
32416+ if (err)
32417+ break;
32418+
32419+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32420+ regs->r11 = addr1;
32421+ regs->r10 = addr2;
32422+ regs->ip = addr1;
32423+ return 2;
32424+ }
32425+ } while (0);
32426+
32427+ do { /* PaX: gcc trampoline emulation #2 */
32428+ unsigned short mov1, mov2, jmp1;
32429+ unsigned char jmp2;
32430+ unsigned long addr1, addr2;
32431+
32432+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32433+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32434+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32435+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32436+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
32437+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
32438+
32439+ if (err)
32440+ break;
32441+
32442+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32443+ regs->r11 = addr1;
32444+ regs->r10 = addr2;
32445+ regs->ip = addr1;
32446+ return 2;
32447+ }
32448+ } while (0);
32449+
32450+ return 1; /* PaX in action */
32451+}
32452+#endif
32453+
32454+/*
32455+ * PaX: decide what to do with offenders (regs->ip = fault address)
32456+ *
32457+ * returns 1 when task should be killed
32458+ * 2 when gcc trampoline was detected
32459+ */
32460+static int pax_handle_fetch_fault(struct pt_regs *regs)
32461+{
32462+ if (v8086_mode(regs))
32463+ return 1;
32464+
32465+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
32466+ return 1;
32467+
32468+#ifdef CONFIG_X86_32
32469+ return pax_handle_fetch_fault_32(regs);
32470+#else
32471+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
32472+ return pax_handle_fetch_fault_32(regs);
32473+ else
32474+ return pax_handle_fetch_fault_64(regs);
32475+#endif
32476+}
32477+#endif
32478+
32479+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32480+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
32481+{
32482+ long i;
32483+
32484+ printk(KERN_ERR "PAX: bytes at PC: ");
32485+ for (i = 0; i < 20; i++) {
32486+ unsigned char c;
32487+ if (get_user(c, (unsigned char __force_user *)pc+i))
32488+ printk(KERN_CONT "?? ");
32489+ else
32490+ printk(KERN_CONT "%02x ", c);
32491+ }
32492+ printk("\n");
32493+
32494+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
32495+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
32496+ unsigned long c;
32497+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
32498+#ifdef CONFIG_X86_32
32499+ printk(KERN_CONT "???????? ");
32500+#else
32501+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
32502+ printk(KERN_CONT "???????? ???????? ");
32503+ else
32504+ printk(KERN_CONT "???????????????? ");
32505+#endif
32506+ } else {
32507+#ifdef CONFIG_X86_64
32508+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
32509+ printk(KERN_CONT "%08x ", (unsigned int)c);
32510+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
32511+ } else
32512+#endif
32513+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
32514+ }
32515+ }
32516+ printk("\n");
32517+}
32518+#endif
32519+
32520+/**
32521+ * probe_kernel_write(): safely attempt to write to a location
32522+ * @dst: address to write to
32523+ * @src: pointer to the data that shall be written
32524+ * @size: size of the data chunk
32525+ *
32526+ * Safely write to address @dst from the buffer at @src. If a kernel fault
32527+ * happens, handle that and return -EFAULT.
32528+ */
32529+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
32530+{
32531+ long ret;
32532+ mm_segment_t old_fs = get_fs();
32533+
32534+ set_fs(KERNEL_DS);
32535+ pagefault_disable();
32536+ pax_open_kernel();
32537+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
32538+ pax_close_kernel();
32539+ pagefault_enable();
32540+ set_fs(old_fs);
32541+
32542+ return ret ? -EFAULT : 0;
32543+}
32544diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
32545index 81bf3d2..7ef25c2 100644
32546--- a/arch/x86/mm/gup.c
32547+++ b/arch/x86/mm/gup.c
32548@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
32549 addr = start;
32550 len = (unsigned long) nr_pages << PAGE_SHIFT;
32551 end = start + len;
32552- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
32553+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32554 (void __user *)start, len)))
32555 return 0;
32556
32557@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
32558 goto slow_irqon;
32559 #endif
32560
32561+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32562+ (void __user *)start, len)))
32563+ return 0;
32564+
32565 /*
32566 * XXX: batch / limit 'nr', to avoid large irq off latency
32567 * needs some instrumenting to determine the common sizes used by
32568diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
32569index 4500142..53a363c 100644
32570--- a/arch/x86/mm/highmem_32.c
32571+++ b/arch/x86/mm/highmem_32.c
32572@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
32573 idx = type + KM_TYPE_NR*smp_processor_id();
32574 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
32575 BUG_ON(!pte_none(*(kmap_pte-idx)));
32576+
32577+ pax_open_kernel();
32578 set_pte(kmap_pte-idx, mk_pte(page, prot));
32579+ pax_close_kernel();
32580+
32581 arch_flush_lazy_mmu_mode();
32582
32583 return (void *)vaddr;
32584diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
32585index 42982b2..7168fc3 100644
32586--- a/arch/x86/mm/hugetlbpage.c
32587+++ b/arch/x86/mm/hugetlbpage.c
32588@@ -74,23 +74,24 @@ int pud_huge(pud_t pud)
32589 #ifdef CONFIG_HUGETLB_PAGE
32590 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
32591 unsigned long addr, unsigned long len,
32592- unsigned long pgoff, unsigned long flags)
32593+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32594 {
32595 struct hstate *h = hstate_file(file);
32596 struct vm_unmapped_area_info info;
32597-
32598+
32599 info.flags = 0;
32600 info.length = len;
32601 info.low_limit = current->mm->mmap_legacy_base;
32602 info.high_limit = TASK_SIZE;
32603 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32604 info.align_offset = 0;
32605+ info.threadstack_offset = offset;
32606 return vm_unmapped_area(&info);
32607 }
32608
32609 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32610 unsigned long addr0, unsigned long len,
32611- unsigned long pgoff, unsigned long flags)
32612+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32613 {
32614 struct hstate *h = hstate_file(file);
32615 struct vm_unmapped_area_info info;
32616@@ -102,6 +103,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32617 info.high_limit = current->mm->mmap_base;
32618 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32619 info.align_offset = 0;
32620+ info.threadstack_offset = offset;
32621 addr = vm_unmapped_area(&info);
32622
32623 /*
32624@@ -114,6 +116,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32625 VM_BUG_ON(addr != -ENOMEM);
32626 info.flags = 0;
32627 info.low_limit = TASK_UNMAPPED_BASE;
32628+
32629+#ifdef CONFIG_PAX_RANDMMAP
32630+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
32631+ info.low_limit += current->mm->delta_mmap;
32632+#endif
32633+
32634 info.high_limit = TASK_SIZE;
32635 addr = vm_unmapped_area(&info);
32636 }
32637@@ -128,10 +136,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32638 struct hstate *h = hstate_file(file);
32639 struct mm_struct *mm = current->mm;
32640 struct vm_area_struct *vma;
32641+ unsigned long pax_task_size = TASK_SIZE;
32642+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
32643
32644 if (len & ~huge_page_mask(h))
32645 return -EINVAL;
32646- if (len > TASK_SIZE)
32647+
32648+#ifdef CONFIG_PAX_SEGMEXEC
32649+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32650+ pax_task_size = SEGMEXEC_TASK_SIZE;
32651+#endif
32652+
32653+ pax_task_size -= PAGE_SIZE;
32654+
32655+ if (len > pax_task_size)
32656 return -ENOMEM;
32657
32658 if (flags & MAP_FIXED) {
32659@@ -140,19 +158,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32660 return addr;
32661 }
32662
32663+#ifdef CONFIG_PAX_RANDMMAP
32664+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
32665+#endif
32666+
32667 if (addr) {
32668 addr = ALIGN(addr, huge_page_size(h));
32669 vma = find_vma(mm, addr);
32670- if (TASK_SIZE - len >= addr &&
32671- (!vma || addr + len <= vma->vm_start))
32672+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
32673 return addr;
32674 }
32675 if (mm->get_unmapped_area == arch_get_unmapped_area)
32676 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
32677- pgoff, flags);
32678+ pgoff, flags, offset);
32679 else
32680 return hugetlb_get_unmapped_area_topdown(file, addr, len,
32681- pgoff, flags);
32682+ pgoff, flags, offset);
32683 }
32684 #endif /* CONFIG_HUGETLB_PAGE */
32685
32686diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
32687index a110efc..a31a18f 100644
32688--- a/arch/x86/mm/init.c
32689+++ b/arch/x86/mm/init.c
32690@@ -4,6 +4,7 @@
32691 #include <linux/swap.h>
32692 #include <linux/memblock.h>
32693 #include <linux/bootmem.h> /* for max_low_pfn */
32694+#include <linux/tboot.h>
32695
32696 #include <asm/cacheflush.h>
32697 #include <asm/e820.h>
32698@@ -17,6 +18,8 @@
32699 #include <asm/proto.h>
32700 #include <asm/dma.h> /* for MAX_DMA_PFN */
32701 #include <asm/microcode.h>
32702+#include <asm/desc.h>
32703+#include <asm/bios_ebda.h>
32704
32705 /*
32706 * We need to define the tracepoints somewhere, and tlb.c
32707@@ -620,7 +623,18 @@ void __init init_mem_mapping(void)
32708 early_ioremap_page_table_range_init();
32709 #endif
32710
32711+#ifdef CONFIG_PAX_PER_CPU_PGD
32712+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
32713+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32714+ KERNEL_PGD_PTRS);
32715+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
32716+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32717+ KERNEL_PGD_PTRS);
32718+ load_cr3(get_cpu_pgd(0, kernel));
32719+#else
32720 load_cr3(swapper_pg_dir);
32721+#endif
32722+
32723 __flush_tlb_all();
32724
32725 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
32726@@ -636,10 +650,40 @@ void __init init_mem_mapping(void)
32727 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
32728 * mmio resources as well as potential bios/acpi data regions.
32729 */
32730+
32731+#ifdef CONFIG_GRKERNSEC_KMEM
32732+static unsigned int ebda_start __read_only;
32733+static unsigned int ebda_end __read_only;
32734+#endif
32735+
32736 int devmem_is_allowed(unsigned long pagenr)
32737 {
32738- if (pagenr < 256)
32739+#ifdef CONFIG_GRKERNSEC_KMEM
32740+ /* allow BDA */
32741+ if (!pagenr)
32742 return 1;
32743+ /* allow EBDA */
32744+ if (pagenr >= ebda_start && pagenr < ebda_end)
32745+ return 1;
32746+ /* if tboot is in use, allow access to its hardcoded serial log range */
32747+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
32748+ return 1;
32749+#else
32750+ if (!pagenr)
32751+ return 1;
32752+#ifdef CONFIG_VM86
32753+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
32754+ return 1;
32755+#endif
32756+#endif
32757+
32758+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
32759+ return 1;
32760+#ifdef CONFIG_GRKERNSEC_KMEM
32761+ /* throw out everything else below 1MB */
32762+ if (pagenr <= 256)
32763+ return 0;
32764+#endif
32765 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
32766 return 0;
32767 if (!page_is_ram(pagenr))
32768@@ -685,8 +729,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
32769 #endif
32770 }
32771
32772+#ifdef CONFIG_GRKERNSEC_KMEM
32773+static inline void gr_init_ebda(void)
32774+{
32775+ unsigned int ebda_addr;
32776+ unsigned int ebda_size = 0;
32777+
32778+ ebda_addr = get_bios_ebda();
32779+ if (ebda_addr) {
32780+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
32781+ ebda_size <<= 10;
32782+ }
32783+ if (ebda_addr && ebda_size) {
32784+ ebda_start = ebda_addr >> PAGE_SHIFT;
32785+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
32786+ } else {
32787+ ebda_start = 0x9f000 >> PAGE_SHIFT;
32788+ ebda_end = 0xa0000 >> PAGE_SHIFT;
32789+ }
32790+}
32791+#else
32792+static inline void gr_init_ebda(void) { }
32793+#endif
32794+
32795 void free_initmem(void)
32796 {
32797+#ifdef CONFIG_PAX_KERNEXEC
32798+#ifdef CONFIG_X86_32
32799+ /* PaX: limit KERNEL_CS to actual size */
32800+ unsigned long addr, limit;
32801+ struct desc_struct d;
32802+ int cpu;
32803+#else
32804+ pgd_t *pgd;
32805+ pud_t *pud;
32806+ pmd_t *pmd;
32807+ unsigned long addr, end;
32808+#endif
32809+#endif
32810+
32811+ gr_init_ebda();
32812+
32813+#ifdef CONFIG_PAX_KERNEXEC
32814+#ifdef CONFIG_X86_32
32815+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
32816+ limit = (limit - 1UL) >> PAGE_SHIFT;
32817+
32818+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
32819+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
32820+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
32821+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
32822+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
32823+ }
32824+
32825+ /* PaX: make KERNEL_CS read-only */
32826+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
32827+ if (!paravirt_enabled())
32828+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
32829+/*
32830+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
32831+ pgd = pgd_offset_k(addr);
32832+ pud = pud_offset(pgd, addr);
32833+ pmd = pmd_offset(pud, addr);
32834+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32835+ }
32836+*/
32837+#ifdef CONFIG_X86_PAE
32838+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
32839+/*
32840+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
32841+ pgd = pgd_offset_k(addr);
32842+ pud = pud_offset(pgd, addr);
32843+ pmd = pmd_offset(pud, addr);
32844+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32845+ }
32846+*/
32847+#endif
32848+
32849+#ifdef CONFIG_MODULES
32850+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
32851+#endif
32852+
32853+#else
32854+ /* PaX: make kernel code/rodata read-only, rest non-executable */
32855+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
32856+ pgd = pgd_offset_k(addr);
32857+ pud = pud_offset(pgd, addr);
32858+ pmd = pmd_offset(pud, addr);
32859+ if (!pmd_present(*pmd))
32860+ continue;
32861+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
32862+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32863+ else
32864+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32865+ }
32866+
32867+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
32868+ end = addr + KERNEL_IMAGE_SIZE;
32869+ for (; addr < end; addr += PMD_SIZE) {
32870+ pgd = pgd_offset_k(addr);
32871+ pud = pud_offset(pgd, addr);
32872+ pmd = pmd_offset(pud, addr);
32873+ if (!pmd_present(*pmd))
32874+ continue;
32875+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
32876+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32877+ }
32878+#endif
32879+
32880+ flush_tlb_all();
32881+#endif
32882+
32883 free_init_pages("unused kernel",
32884 (unsigned long)(&__init_begin),
32885 (unsigned long)(&__init_end));
32886diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
32887index c8140e1..59257fc 100644
32888--- a/arch/x86/mm/init_32.c
32889+++ b/arch/x86/mm/init_32.c
32890@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
32891 bool __read_mostly __vmalloc_start_set = false;
32892
32893 /*
32894- * Creates a middle page table and puts a pointer to it in the
32895- * given global directory entry. This only returns the gd entry
32896- * in non-PAE compilation mode, since the middle layer is folded.
32897- */
32898-static pmd_t * __init one_md_table_init(pgd_t *pgd)
32899-{
32900- pud_t *pud;
32901- pmd_t *pmd_table;
32902-
32903-#ifdef CONFIG_X86_PAE
32904- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
32905- pmd_table = (pmd_t *)alloc_low_page();
32906- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
32907- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
32908- pud = pud_offset(pgd, 0);
32909- BUG_ON(pmd_table != pmd_offset(pud, 0));
32910-
32911- return pmd_table;
32912- }
32913-#endif
32914- pud = pud_offset(pgd, 0);
32915- pmd_table = pmd_offset(pud, 0);
32916-
32917- return pmd_table;
32918-}
32919-
32920-/*
32921 * Create a page table and place a pointer to it in a middle page
32922 * directory entry:
32923 */
32924@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
32925 pte_t *page_table = (pte_t *)alloc_low_page();
32926
32927 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
32928+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32929+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
32930+#else
32931 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
32932+#endif
32933 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
32934 }
32935
32936 return pte_offset_kernel(pmd, 0);
32937 }
32938
32939+static pmd_t * __init one_md_table_init(pgd_t *pgd)
32940+{
32941+ pud_t *pud;
32942+ pmd_t *pmd_table;
32943+
32944+ pud = pud_offset(pgd, 0);
32945+ pmd_table = pmd_offset(pud, 0);
32946+
32947+ return pmd_table;
32948+}
32949+
32950 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
32951 {
32952 int pgd_idx = pgd_index(vaddr);
32953@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32954 int pgd_idx, pmd_idx;
32955 unsigned long vaddr;
32956 pgd_t *pgd;
32957+ pud_t *pud;
32958 pmd_t *pmd;
32959 pte_t *pte = NULL;
32960 unsigned long count = page_table_range_init_count(start, end);
32961@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32962 pgd = pgd_base + pgd_idx;
32963
32964 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
32965- pmd = one_md_table_init(pgd);
32966- pmd = pmd + pmd_index(vaddr);
32967+ pud = pud_offset(pgd, vaddr);
32968+ pmd = pmd_offset(pud, vaddr);
32969+
32970+#ifdef CONFIG_X86_PAE
32971+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
32972+#endif
32973+
32974 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
32975 pmd++, pmd_idx++) {
32976 pte = page_table_kmap_check(one_page_table_init(pmd),
32977@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32978 }
32979 }
32980
32981-static inline int is_kernel_text(unsigned long addr)
32982+static inline int is_kernel_text(unsigned long start, unsigned long end)
32983 {
32984- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
32985- return 1;
32986- return 0;
32987+ if ((start >= ktla_ktva((unsigned long)_etext) ||
32988+ end <= ktla_ktva((unsigned long)_stext)) &&
32989+ (start >= ktla_ktva((unsigned long)_einittext) ||
32990+ end <= ktla_ktva((unsigned long)_sinittext)) &&
32991+
32992+#ifdef CONFIG_ACPI_SLEEP
32993+ (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
32994+#endif
32995+
32996+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
32997+ return 0;
32998+ return 1;
32999 }
33000
33001 /*
33002@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
33003 unsigned long last_map_addr = end;
33004 unsigned long start_pfn, end_pfn;
33005 pgd_t *pgd_base = swapper_pg_dir;
33006- int pgd_idx, pmd_idx, pte_ofs;
33007+ unsigned int pgd_idx, pmd_idx, pte_ofs;
33008 unsigned long pfn;
33009 pgd_t *pgd;
33010+ pud_t *pud;
33011 pmd_t *pmd;
33012 pte_t *pte;
33013 unsigned pages_2m, pages_4k;
33014@@ -291,8 +295,13 @@ repeat:
33015 pfn = start_pfn;
33016 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33017 pgd = pgd_base + pgd_idx;
33018- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
33019- pmd = one_md_table_init(pgd);
33020+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
33021+ pud = pud_offset(pgd, 0);
33022+ pmd = pmd_offset(pud, 0);
33023+
33024+#ifdef CONFIG_X86_PAE
33025+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
33026+#endif
33027
33028 if (pfn >= end_pfn)
33029 continue;
33030@@ -304,14 +313,13 @@ repeat:
33031 #endif
33032 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
33033 pmd++, pmd_idx++) {
33034- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
33035+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
33036
33037 /*
33038 * Map with big pages if possible, otherwise
33039 * create normal page tables:
33040 */
33041 if (use_pse) {
33042- unsigned int addr2;
33043 pgprot_t prot = PAGE_KERNEL_LARGE;
33044 /*
33045 * first pass will use the same initial
33046@@ -322,11 +330,7 @@ repeat:
33047 _PAGE_PSE);
33048
33049 pfn &= PMD_MASK >> PAGE_SHIFT;
33050- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
33051- PAGE_OFFSET + PAGE_SIZE-1;
33052-
33053- if (is_kernel_text(addr) ||
33054- is_kernel_text(addr2))
33055+ if (is_kernel_text(address, address + PMD_SIZE))
33056 prot = PAGE_KERNEL_LARGE_EXEC;
33057
33058 pages_2m++;
33059@@ -343,7 +347,7 @@ repeat:
33060 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33061 pte += pte_ofs;
33062 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
33063- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
33064+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
33065 pgprot_t prot = PAGE_KERNEL;
33066 /*
33067 * first pass will use the same initial
33068@@ -351,7 +355,7 @@ repeat:
33069 */
33070 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
33071
33072- if (is_kernel_text(addr))
33073+ if (is_kernel_text(address, address + PAGE_SIZE))
33074 prot = PAGE_KERNEL_EXEC;
33075
33076 pages_4k++;
33077@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
33078
33079 pud = pud_offset(pgd, va);
33080 pmd = pmd_offset(pud, va);
33081- if (!pmd_present(*pmd))
33082+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
33083 break;
33084
33085 /* should not be large page here */
33086@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
33087
33088 static void __init pagetable_init(void)
33089 {
33090- pgd_t *pgd_base = swapper_pg_dir;
33091-
33092- permanent_kmaps_init(pgd_base);
33093+ permanent_kmaps_init(swapper_pg_dir);
33094 }
33095
33096-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
33097+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL);
33098 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33099
33100 /* user-defined highmem size */
33101@@ -787,10 +789,10 @@ void __init mem_init(void)
33102 ((unsigned long)&__init_end -
33103 (unsigned long)&__init_begin) >> 10,
33104
33105- (unsigned long)&_etext, (unsigned long)&_edata,
33106- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
33107+ (unsigned long)&_sdata, (unsigned long)&_edata,
33108+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
33109
33110- (unsigned long)&_text, (unsigned long)&_etext,
33111+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
33112 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
33113
33114 /*
33115@@ -884,6 +886,7 @@ void set_kernel_text_rw(void)
33116 if (!kernel_set_to_readonly)
33117 return;
33118
33119+ start = ktla_ktva(start);
33120 pr_debug("Set kernel text: %lx - %lx for read write\n",
33121 start, start+size);
33122
33123@@ -898,6 +901,7 @@ void set_kernel_text_ro(void)
33124 if (!kernel_set_to_readonly)
33125 return;
33126
33127+ start = ktla_ktva(start);
33128 pr_debug("Set kernel text: %lx - %lx for read only\n",
33129 start, start+size);
33130
33131@@ -926,6 +930,7 @@ void mark_rodata_ro(void)
33132 unsigned long start = PFN_ALIGN(_text);
33133 unsigned long size = PFN_ALIGN(_etext) - start;
33134
33135+ start = ktla_ktva(start);
33136 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
33137 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
33138 size >> 10);
33139diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
33140index 30eb05a..ae671ac 100644
33141--- a/arch/x86/mm/init_64.c
33142+++ b/arch/x86/mm/init_64.c
33143@@ -150,7 +150,7 @@ early_param("gbpages", parse_direct_gbpages_on);
33144 * around without checking the pgd every time.
33145 */
33146
33147-pteval_t __supported_pte_mask __read_mostly = ~0;
33148+pteval_t __supported_pte_mask __read_only = ~_PAGE_NX;
33149 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33150
33151 int force_personality32;
33152@@ -183,7 +183,12 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33153
33154 for (address = start; address <= end; address += PGDIR_SIZE) {
33155 const pgd_t *pgd_ref = pgd_offset_k(address);
33156+
33157+#ifdef CONFIG_PAX_PER_CPU_PGD
33158+ unsigned long cpu;
33159+#else
33160 struct page *page;
33161+#endif
33162
33163 /*
33164 * When it is called after memory hot remove, pgd_none()
33165@@ -194,6 +199,25 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33166 continue;
33167
33168 spin_lock(&pgd_lock);
33169+
33170+#ifdef CONFIG_PAX_PER_CPU_PGD
33171+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33172+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
33173+
33174+ if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33175+ BUG_ON(pgd_page_vaddr(*pgd)
33176+ != pgd_page_vaddr(*pgd_ref));
33177+
33178+ if (removed) {
33179+ if (pgd_none(*pgd_ref) && !pgd_none(*pgd))
33180+ pgd_clear(pgd);
33181+ } else {
33182+ if (pgd_none(*pgd))
33183+ set_pgd(pgd, *pgd_ref);
33184+ }
33185+
33186+ pgd = pgd_offset_cpu(cpu, kernel, address);
33187+#else
33188 list_for_each_entry(page, &pgd_list, lru) {
33189 pgd_t *pgd;
33190 spinlock_t *pgt_lock;
33191@@ -202,6 +226,7 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33192 /* the pgt_lock only for Xen */
33193 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
33194 spin_lock(pgt_lock);
33195+#endif
33196
33197 if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33198 BUG_ON(pgd_page_vaddr(*pgd)
33199@@ -215,7 +240,10 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33200 set_pgd(pgd, *pgd_ref);
33201 }
33202
33203+#ifndef CONFIG_PAX_PER_CPU_PGD
33204 spin_unlock(pgt_lock);
33205+#endif
33206+
33207 }
33208 spin_unlock(&pgd_lock);
33209 }
33210@@ -248,7 +276,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
33211 {
33212 if (pgd_none(*pgd)) {
33213 pud_t *pud = (pud_t *)spp_getpage();
33214- pgd_populate(&init_mm, pgd, pud);
33215+ pgd_populate_kernel(&init_mm, pgd, pud);
33216 if (pud != pud_offset(pgd, 0))
33217 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
33218 pud, pud_offset(pgd, 0));
33219@@ -260,7 +288,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
33220 {
33221 if (pud_none(*pud)) {
33222 pmd_t *pmd = (pmd_t *) spp_getpage();
33223- pud_populate(&init_mm, pud, pmd);
33224+ pud_populate_kernel(&init_mm, pud, pmd);
33225 if (pmd != pmd_offset(pud, 0))
33226 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
33227 pmd, pmd_offset(pud, 0));
33228@@ -289,7 +317,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
33229 pmd = fill_pmd(pud, vaddr);
33230 pte = fill_pte(pmd, vaddr);
33231
33232+ pax_open_kernel();
33233 set_pte(pte, new_pte);
33234+ pax_close_kernel();
33235
33236 /*
33237 * It's enough to flush this one mapping.
33238@@ -351,14 +381,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
33239 pgd = pgd_offset_k((unsigned long)__va(phys));
33240 if (pgd_none(*pgd)) {
33241 pud = (pud_t *) spp_getpage();
33242- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
33243- _PAGE_USER));
33244+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
33245 }
33246 pud = pud_offset(pgd, (unsigned long)__va(phys));
33247 if (pud_none(*pud)) {
33248 pmd = (pmd_t *) spp_getpage();
33249- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
33250- _PAGE_USER));
33251+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
33252 }
33253 pmd = pmd_offset(pud, phys);
33254 BUG_ON(!pmd_none(*pmd));
33255@@ -599,7 +627,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
33256 prot);
33257
33258 spin_lock(&init_mm.page_table_lock);
33259- pud_populate(&init_mm, pud, pmd);
33260+ pud_populate_kernel(&init_mm, pud, pmd);
33261 spin_unlock(&init_mm.page_table_lock);
33262 }
33263 __flush_tlb_all();
33264@@ -640,7 +668,7 @@ kernel_physical_mapping_init(unsigned long start,
33265 page_size_mask);
33266
33267 spin_lock(&init_mm.page_table_lock);
33268- pgd_populate(&init_mm, pgd, pud);
33269+ pgd_populate_kernel(&init_mm, pgd, pud);
33270 spin_unlock(&init_mm.page_table_lock);
33271 pgd_changed = true;
33272 }
33273diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
33274index 9ca35fc..4b2b7b7 100644
33275--- a/arch/x86/mm/iomap_32.c
33276+++ b/arch/x86/mm/iomap_32.c
33277@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
33278 type = kmap_atomic_idx_push();
33279 idx = type + KM_TYPE_NR * smp_processor_id();
33280 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
33281+
33282+ pax_open_kernel();
33283 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
33284+ pax_close_kernel();
33285+
33286 arch_flush_lazy_mmu_mode();
33287
33288 return (void *)vaddr;
33289diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
33290index fdf617c..b9e85bc 100644
33291--- a/arch/x86/mm/ioremap.c
33292+++ b/arch/x86/mm/ioremap.c
33293@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
33294 unsigned long i;
33295
33296 for (i = 0; i < nr_pages; ++i)
33297- if (pfn_valid(start_pfn + i) &&
33298- !PageReserved(pfn_to_page(start_pfn + i)))
33299+ if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 ||
33300+ !PageReserved(pfn_to_page(start_pfn + i))))
33301 return 1;
33302
33303 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
33304@@ -283,7 +283,7 @@ EXPORT_SYMBOL(ioremap_prot);
33305 *
33306 * Caller must ensure there is only one unmapping for the same pointer.
33307 */
33308-void iounmap(volatile void __iomem *addr)
33309+void iounmap(const volatile void __iomem *addr)
33310 {
33311 struct vm_struct *p, *o;
33312
33313@@ -332,30 +332,29 @@ EXPORT_SYMBOL(iounmap);
33314 */
33315 void *xlate_dev_mem_ptr(phys_addr_t phys)
33316 {
33317- void *addr;
33318- unsigned long start = phys & PAGE_MASK;
33319-
33320 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
33321- if (page_is_ram(start >> PAGE_SHIFT))
33322+ if (page_is_ram(phys >> PAGE_SHIFT))
33323+#ifdef CONFIG_HIGHMEM
33324+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33325+#endif
33326 return __va(phys);
33327
33328- addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
33329- if (addr)
33330- addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
33331-
33332- return addr;
33333+ return (void __force *)ioremap_cache(phys, PAGE_SIZE);
33334 }
33335
33336 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
33337 {
33338 if (page_is_ram(phys >> PAGE_SHIFT))
33339+#ifdef CONFIG_HIGHMEM
33340+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33341+#endif
33342 return;
33343
33344 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
33345 return;
33346 }
33347
33348-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
33349+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
33350
33351 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
33352 {
33353@@ -391,8 +390,7 @@ void __init early_ioremap_init(void)
33354 early_ioremap_setup();
33355
33356 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
33357- memset(bm_pte, 0, sizeof(bm_pte));
33358- pmd_populate_kernel(&init_mm, pmd, bm_pte);
33359+ pmd_populate_user(&init_mm, pmd, bm_pte);
33360
33361 /*
33362 * The boot-ioremap range spans multiple pmds, for which
33363diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
33364index b4f2e7e..96c9c3e 100644
33365--- a/arch/x86/mm/kmemcheck/kmemcheck.c
33366+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
33367@@ -628,9 +628,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
33368 * memory (e.g. tracked pages)? For now, we need this to avoid
33369 * invoking kmemcheck for PnP BIOS calls.
33370 */
33371- if (regs->flags & X86_VM_MASK)
33372+ if (v8086_mode(regs))
33373 return false;
33374- if (regs->cs != __KERNEL_CS)
33375+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
33376 return false;
33377
33378 pte = kmemcheck_pte_lookup(address);
33379diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
33380index df4552b..12c129c 100644
33381--- a/arch/x86/mm/mmap.c
33382+++ b/arch/x86/mm/mmap.c
33383@@ -52,7 +52,7 @@ static unsigned long stack_maxrandom_size(void)
33384 * Leave an at least ~128 MB hole with possible stack randomization.
33385 */
33386 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
33387-#define MAX_GAP (TASK_SIZE/6*5)
33388+#define MAX_GAP (pax_task_size/6*5)
33389
33390 static int mmap_is_legacy(void)
33391 {
33392@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
33393 return rnd << PAGE_SHIFT;
33394 }
33395
33396-static unsigned long mmap_base(void)
33397+static unsigned long mmap_base(struct mm_struct *mm)
33398 {
33399 unsigned long gap = rlimit(RLIMIT_STACK);
33400+ unsigned long pax_task_size = TASK_SIZE;
33401+
33402+#ifdef CONFIG_PAX_SEGMEXEC
33403+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33404+ pax_task_size = SEGMEXEC_TASK_SIZE;
33405+#endif
33406
33407 if (gap < MIN_GAP)
33408 gap = MIN_GAP;
33409 else if (gap > MAX_GAP)
33410 gap = MAX_GAP;
33411
33412- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
33413+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
33414 }
33415
33416 /*
33417 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
33418 * does, but not when emulating X86_32
33419 */
33420-static unsigned long mmap_legacy_base(void)
33421+static unsigned long mmap_legacy_base(struct mm_struct *mm)
33422 {
33423- if (mmap_is_ia32())
33424+ if (mmap_is_ia32()) {
33425+
33426+#ifdef CONFIG_PAX_SEGMEXEC
33427+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33428+ return SEGMEXEC_TASK_UNMAPPED_BASE;
33429+ else
33430+#endif
33431+
33432 return TASK_UNMAPPED_BASE;
33433- else
33434+ } else
33435 return TASK_UNMAPPED_BASE + mmap_rnd();
33436 }
33437
33438@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
33439 */
33440 void arch_pick_mmap_layout(struct mm_struct *mm)
33441 {
33442- mm->mmap_legacy_base = mmap_legacy_base();
33443- mm->mmap_base = mmap_base();
33444+ mm->mmap_legacy_base = mmap_legacy_base(mm);
33445+ mm->mmap_base = mmap_base(mm);
33446+
33447+#ifdef CONFIG_PAX_RANDMMAP
33448+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
33449+ mm->mmap_legacy_base += mm->delta_mmap;
33450+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
33451+ }
33452+#endif
33453
33454 if (mmap_is_legacy()) {
33455 mm->mmap_base = mm->mmap_legacy_base;
33456diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
33457index 0057a7a..95c7edd 100644
33458--- a/arch/x86/mm/mmio-mod.c
33459+++ b/arch/x86/mm/mmio-mod.c
33460@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
33461 break;
33462 default:
33463 {
33464- unsigned char *ip = (unsigned char *)instptr;
33465+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
33466 my_trace->opcode = MMIO_UNKNOWN_OP;
33467 my_trace->width = 0;
33468 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
33469@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
33470 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33471 void __iomem *addr)
33472 {
33473- static atomic_t next_id;
33474+ static atomic_unchecked_t next_id;
33475 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
33476 /* These are page-unaligned. */
33477 struct mmiotrace_map map = {
33478@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33479 .private = trace
33480 },
33481 .phys = offset,
33482- .id = atomic_inc_return(&next_id)
33483+ .id = atomic_inc_return_unchecked(&next_id)
33484 };
33485 map.map_id = trace->id;
33486
33487@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
33488 ioremap_trace_core(offset, size, addr);
33489 }
33490
33491-static void iounmap_trace_core(volatile void __iomem *addr)
33492+static void iounmap_trace_core(const volatile void __iomem *addr)
33493 {
33494 struct mmiotrace_map map = {
33495 .phys = 0,
33496@@ -328,7 +328,7 @@ not_enabled:
33497 }
33498 }
33499
33500-void mmiotrace_iounmap(volatile void __iomem *addr)
33501+void mmiotrace_iounmap(const volatile void __iomem *addr)
33502 {
33503 might_sleep();
33504 if (is_enabled()) /* recheck and proper locking in *_core() */
33505diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
33506index cd4785b..25188b6 100644
33507--- a/arch/x86/mm/numa.c
33508+++ b/arch/x86/mm/numa.c
33509@@ -499,7 +499,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
33510 }
33511 }
33512
33513-static int __init numa_register_memblks(struct numa_meminfo *mi)
33514+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
33515 {
33516 unsigned long uninitialized_var(pfn_align);
33517 int i, nid;
33518diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
33519index 536ea2f..f42c293 100644
33520--- a/arch/x86/mm/pageattr.c
33521+++ b/arch/x86/mm/pageattr.c
33522@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33523 */
33524 #ifdef CONFIG_PCI_BIOS
33525 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
33526- pgprot_val(forbidden) |= _PAGE_NX;
33527+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33528 #endif
33529
33530 /*
33531@@ -270,9 +270,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33532 * Does not cover __inittext since that is gone later on. On
33533 * 64bit we do not enforce !NX on the low mapping
33534 */
33535- if (within(address, (unsigned long)_text, (unsigned long)_etext))
33536- pgprot_val(forbidden) |= _PAGE_NX;
33537+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
33538+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33539
33540+#ifdef CONFIG_DEBUG_RODATA
33541 /*
33542 * The .rodata section needs to be read-only. Using the pfn
33543 * catches all aliases.
33544@@ -280,6 +281,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33545 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
33546 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
33547 pgprot_val(forbidden) |= _PAGE_RW;
33548+#endif
33549
33550 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
33551 /*
33552@@ -318,6 +320,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33553 }
33554 #endif
33555
33556+#ifdef CONFIG_PAX_KERNEXEC
33557+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
33558+ pgprot_val(forbidden) |= _PAGE_RW;
33559+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33560+ }
33561+#endif
33562+
33563 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
33564
33565 return prot;
33566@@ -440,23 +449,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
33567 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
33568 {
33569 /* change init_mm */
33570+ pax_open_kernel();
33571 set_pte_atomic(kpte, pte);
33572+
33573 #ifdef CONFIG_X86_32
33574 if (!SHARED_KERNEL_PMD) {
33575+
33576+#ifdef CONFIG_PAX_PER_CPU_PGD
33577+ unsigned long cpu;
33578+#else
33579 struct page *page;
33580+#endif
33581
33582+#ifdef CONFIG_PAX_PER_CPU_PGD
33583+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33584+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
33585+#else
33586 list_for_each_entry(page, &pgd_list, lru) {
33587- pgd_t *pgd;
33588+ pgd_t *pgd = (pgd_t *)page_address(page);
33589+#endif
33590+
33591 pud_t *pud;
33592 pmd_t *pmd;
33593
33594- pgd = (pgd_t *)page_address(page) + pgd_index(address);
33595+ pgd += pgd_index(address);
33596 pud = pud_offset(pgd, address);
33597 pmd = pmd_offset(pud, address);
33598 set_pte_atomic((pte_t *)pmd, pte);
33599 }
33600 }
33601 #endif
33602+ pax_close_kernel();
33603 }
33604
33605 static int
33606diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
33607index 7ac6869..c0ba541 100644
33608--- a/arch/x86/mm/pat.c
33609+++ b/arch/x86/mm/pat.c
33610@@ -89,7 +89,7 @@ static inline enum page_cache_mode get_page_memtype(struct page *pg)
33611 unsigned long pg_flags = pg->flags & _PGMT_MASK;
33612
33613 if (pg_flags == _PGMT_DEFAULT)
33614- return -1;
33615+ return _PAGE_CACHE_MODE_NUM;
33616 else if (pg_flags == _PGMT_WC)
33617 return _PAGE_CACHE_MODE_WC;
33618 else if (pg_flags == _PGMT_UC_MINUS)
33619@@ -346,7 +346,7 @@ static int reserve_ram_pages_type(u64 start, u64 end,
33620
33621 page = pfn_to_page(pfn);
33622 type = get_page_memtype(page);
33623- if (type != -1) {
33624+ if (type != _PAGE_CACHE_MODE_NUM) {
33625 pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
33626 start, end - 1, type, req_type);
33627 if (new_type)
33628@@ -498,7 +498,7 @@ int free_memtype(u64 start, u64 end)
33629
33630 if (!entry) {
33631 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
33632- current->comm, current->pid, start, end - 1);
33633+ current->comm, task_pid_nr(current), start, end - 1);
33634 return -EINVAL;
33635 }
33636
33637@@ -532,10 +532,10 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
33638 page = pfn_to_page(paddr >> PAGE_SHIFT);
33639 rettype = get_page_memtype(page);
33640 /*
33641- * -1 from get_page_memtype() implies RAM page is in its
33642+ * _PAGE_CACHE_MODE_NUM from get_page_memtype() implies RAM page is in its
33643 * default state and not reserved, and hence of type WB
33644 */
33645- if (rettype == -1)
33646+ if (rettype == _PAGE_CACHE_MODE_NUM)
33647 rettype = _PAGE_CACHE_MODE_WB;
33648
33649 return rettype;
33650@@ -628,8 +628,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33651
33652 while (cursor < to) {
33653 if (!devmem_is_allowed(pfn)) {
33654- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
33655- current->comm, from, to - 1);
33656+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
33657+ current->comm, from, to - 1, cursor);
33658 return 0;
33659 }
33660 cursor += PAGE_SIZE;
33661@@ -700,7 +700,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
33662 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
33663 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
33664 "for [mem %#010Lx-%#010Lx]\n",
33665- current->comm, current->pid,
33666+ current->comm, task_pid_nr(current),
33667 cattr_name(pcm),
33668 base, (unsigned long long)(base + size-1));
33669 return -EINVAL;
33670@@ -735,7 +735,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33671 pcm = lookup_memtype(paddr);
33672 if (want_pcm != pcm) {
33673 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
33674- current->comm, current->pid,
33675+ current->comm, task_pid_nr(current),
33676 cattr_name(want_pcm),
33677 (unsigned long long)paddr,
33678 (unsigned long long)(paddr + size - 1),
33679@@ -757,7 +757,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33680 free_memtype(paddr, paddr + size);
33681 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
33682 " for [mem %#010Lx-%#010Lx], got %s\n",
33683- current->comm, current->pid,
33684+ current->comm, task_pid_nr(current),
33685 cattr_name(want_pcm),
33686 (unsigned long long)paddr,
33687 (unsigned long long)(paddr + size - 1),
33688diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
33689index 6582adc..fcc5d0b 100644
33690--- a/arch/x86/mm/pat_rbtree.c
33691+++ b/arch/x86/mm/pat_rbtree.c
33692@@ -161,7 +161,7 @@ success:
33693
33694 failure:
33695 printk(KERN_INFO "%s:%d conflicting memory types "
33696- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
33697+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
33698 end, cattr_name(found_type), cattr_name(match->type));
33699 return -EBUSY;
33700 }
33701diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
33702index 9f0614d..92ae64a 100644
33703--- a/arch/x86/mm/pf_in.c
33704+++ b/arch/x86/mm/pf_in.c
33705@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
33706 int i;
33707 enum reason_type rv = OTHERS;
33708
33709- p = (unsigned char *)ins_addr;
33710+ p = (unsigned char *)ktla_ktva(ins_addr);
33711 p += skip_prefix(p, &prf);
33712 p += get_opcode(p, &opcode);
33713
33714@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
33715 struct prefix_bits prf;
33716 int i;
33717
33718- p = (unsigned char *)ins_addr;
33719+ p = (unsigned char *)ktla_ktva(ins_addr);
33720 p += skip_prefix(p, &prf);
33721 p += get_opcode(p, &opcode);
33722
33723@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
33724 struct prefix_bits prf;
33725 int i;
33726
33727- p = (unsigned char *)ins_addr;
33728+ p = (unsigned char *)ktla_ktva(ins_addr);
33729 p += skip_prefix(p, &prf);
33730 p += get_opcode(p, &opcode);
33731
33732@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
33733 struct prefix_bits prf;
33734 int i;
33735
33736- p = (unsigned char *)ins_addr;
33737+ p = (unsigned char *)ktla_ktva(ins_addr);
33738 p += skip_prefix(p, &prf);
33739 p += get_opcode(p, &opcode);
33740 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
33741@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
33742 struct prefix_bits prf;
33743 int i;
33744
33745- p = (unsigned char *)ins_addr;
33746+ p = (unsigned char *)ktla_ktva(ins_addr);
33747 p += skip_prefix(p, &prf);
33748 p += get_opcode(p, &opcode);
33749 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
33750diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
33751index 7b22ada..b11e66f 100644
33752--- a/arch/x86/mm/pgtable.c
33753+++ b/arch/x86/mm/pgtable.c
33754@@ -97,10 +97,75 @@ static inline void pgd_list_del(pgd_t *pgd)
33755 list_del(&page->lru);
33756 }
33757
33758-#define UNSHARED_PTRS_PER_PGD \
33759- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33760+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33761+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
33762
33763+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
33764+{
33765+ unsigned int count = USER_PGD_PTRS;
33766
33767+ if (!pax_user_shadow_base)
33768+ return;
33769+
33770+ while (count--)
33771+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
33772+}
33773+#endif
33774+
33775+#ifdef CONFIG_PAX_PER_CPU_PGD
33776+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
33777+{
33778+ unsigned int count = USER_PGD_PTRS;
33779+
33780+ while (count--) {
33781+ pgd_t pgd;
33782+
33783+#ifdef CONFIG_X86_64
33784+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
33785+#else
33786+ pgd = *src++;
33787+#endif
33788+
33789+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33790+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
33791+#endif
33792+
33793+ *dst++ = pgd;
33794+ }
33795+
33796+}
33797+#endif
33798+
33799+#ifdef CONFIG_X86_64
33800+#define pxd_t pud_t
33801+#define pyd_t pgd_t
33802+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
33803+#define pgtable_pxd_page_ctor(page) true
33804+#define pgtable_pxd_page_dtor(page) do {} while (0)
33805+#define pxd_free(mm, pud) pud_free((mm), (pud))
33806+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
33807+#define pyd_offset(mm, address) pgd_offset((mm), (address))
33808+#define PYD_SIZE PGDIR_SIZE
33809+#define mm_inc_nr_pxds(mm) do {} while (0)
33810+#define mm_dec_nr_pxds(mm) do {} while (0)
33811+#else
33812+#define pxd_t pmd_t
33813+#define pyd_t pud_t
33814+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
33815+#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
33816+#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
33817+#define pxd_free(mm, pud) pmd_free((mm), (pud))
33818+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
33819+#define pyd_offset(mm, address) pud_offset((mm), (address))
33820+#define PYD_SIZE PUD_SIZE
33821+#define mm_inc_nr_pxds(mm) mm_inc_nr_pmds(mm)
33822+#define mm_dec_nr_pxds(mm) mm_dec_nr_pmds(mm)
33823+#endif
33824+
33825+#ifdef CONFIG_PAX_PER_CPU_PGD
33826+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
33827+static inline void pgd_dtor(pgd_t *pgd) {}
33828+#else
33829 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
33830 {
33831 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
33832@@ -141,6 +206,7 @@ static void pgd_dtor(pgd_t *pgd)
33833 pgd_list_del(pgd);
33834 spin_unlock(&pgd_lock);
33835 }
33836+#endif
33837
33838 /*
33839 * List of all pgd's needed for non-PAE so it can invalidate entries
33840@@ -153,7 +219,7 @@ static void pgd_dtor(pgd_t *pgd)
33841 * -- nyc
33842 */
33843
33844-#ifdef CONFIG_X86_PAE
33845+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
33846 /*
33847 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
33848 * updating the top-level pagetable entries to guarantee the
33849@@ -165,7 +231,7 @@ static void pgd_dtor(pgd_t *pgd)
33850 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
33851 * and initialize the kernel pmds here.
33852 */
33853-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
33854+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33855
33856 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33857 {
33858@@ -183,46 +249,48 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33859 */
33860 flush_tlb_mm(mm);
33861 }
33862+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
33863+#define PREALLOCATED_PXDS USER_PGD_PTRS
33864 #else /* !CONFIG_X86_PAE */
33865
33866 /* No need to prepopulate any pagetable entries in non-PAE modes. */
33867-#define PREALLOCATED_PMDS 0
33868+#define PREALLOCATED_PXDS 0
33869
33870 #endif /* CONFIG_X86_PAE */
33871
33872-static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
33873+static void free_pxds(struct mm_struct *mm, pxd_t *pxds[])
33874 {
33875 int i;
33876
33877- for(i = 0; i < PREALLOCATED_PMDS; i++)
33878- if (pmds[i]) {
33879- pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
33880- free_page((unsigned long)pmds[i]);
33881- mm_dec_nr_pmds(mm);
33882+ for(i = 0; i < PREALLOCATED_PXDS; i++)
33883+ if (pxds[i]) {
33884+ pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
33885+ free_page((unsigned long)pxds[i]);
33886+ mm_dec_nr_pxds(mm);
33887 }
33888 }
33889
33890-static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
33891+static int preallocate_pxds(struct mm_struct *mm, pxd_t *pxds[])
33892 {
33893 int i;
33894 bool failed = false;
33895
33896- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33897- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
33898- if (!pmd)
33899+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33900+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
33901+ if (!pxd)
33902 failed = true;
33903- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
33904- free_page((unsigned long)pmd);
33905- pmd = NULL;
33906+ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
33907+ free_page((unsigned long)pxd);
33908+ pxd = NULL;
33909 failed = true;
33910 }
33911- if (pmd)
33912- mm_inc_nr_pmds(mm);
33913- pmds[i] = pmd;
33914+ if (pxd)
33915+ mm_inc_nr_pxds(mm);
33916+ pxds[i] = pxd;
33917 }
33918
33919 if (failed) {
33920- free_pmds(mm, pmds);
33921+ free_pxds(mm, pxds);
33922 return -ENOMEM;
33923 }
33924
33925@@ -235,50 +303,54 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
33926 * preallocate which never got a corresponding vma will need to be
33927 * freed manually.
33928 */
33929-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
33930+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
33931 {
33932 int i;
33933
33934- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33935+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33936 pgd_t pgd = pgdp[i];
33937
33938 if (pgd_val(pgd) != 0) {
33939- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
33940+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
33941
33942- pgdp[i] = native_make_pgd(0);
33943+ set_pgd(pgdp + i, native_make_pgd(0));
33944
33945- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
33946- pmd_free(mm, pmd);
33947- mm_dec_nr_pmds(mm);
33948+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
33949+ pxd_free(mm, pxd);
33950+ mm_dec_nr_pxds(mm);
33951 }
33952 }
33953 }
33954
33955-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
33956+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
33957 {
33958- pud_t *pud;
33959+ pyd_t *pyd;
33960 int i;
33961
33962- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
33963+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
33964 return;
33965
33966- pud = pud_offset(pgd, 0);
33967+#ifdef CONFIG_X86_64
33968+ pyd = pyd_offset(mm, 0L);
33969+#else
33970+ pyd = pyd_offset(pgd, 0L);
33971+#endif
33972
33973- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
33974- pmd_t *pmd = pmds[i];
33975+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
33976+ pxd_t *pxd = pxds[i];
33977
33978 if (i >= KERNEL_PGD_BOUNDARY)
33979- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33980- sizeof(pmd_t) * PTRS_PER_PMD);
33981+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33982+ sizeof(pxd_t) * PTRS_PER_PMD);
33983
33984- pud_populate(mm, pud, pmd);
33985+ pyd_populate(mm, pyd, pxd);
33986 }
33987 }
33988
33989 pgd_t *pgd_alloc(struct mm_struct *mm)
33990 {
33991 pgd_t *pgd;
33992- pmd_t *pmds[PREALLOCATED_PMDS];
33993+ pxd_t *pxds[PREALLOCATED_PXDS];
33994
33995 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
33996
33997@@ -287,11 +359,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
33998
33999 mm->pgd = pgd;
34000
34001- if (preallocate_pmds(mm, pmds) != 0)
34002+ if (preallocate_pxds(mm, pxds) != 0)
34003 goto out_free_pgd;
34004
34005 if (paravirt_pgd_alloc(mm) != 0)
34006- goto out_free_pmds;
34007+ goto out_free_pxds;
34008
34009 /*
34010 * Make sure that pre-populating the pmds is atomic with
34011@@ -301,14 +373,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
34012 spin_lock(&pgd_lock);
34013
34014 pgd_ctor(mm, pgd);
34015- pgd_prepopulate_pmd(mm, pgd, pmds);
34016+ pgd_prepopulate_pxd(mm, pgd, pxds);
34017
34018 spin_unlock(&pgd_lock);
34019
34020 return pgd;
34021
34022-out_free_pmds:
34023- free_pmds(mm, pmds);
34024+out_free_pxds:
34025+ free_pxds(mm, pxds);
34026 out_free_pgd:
34027 free_page((unsigned long)pgd);
34028 out:
34029@@ -317,7 +389,7 @@ out:
34030
34031 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
34032 {
34033- pgd_mop_up_pmds(mm, pgd);
34034+ pgd_mop_up_pxds(mm, pgd);
34035 pgd_dtor(pgd);
34036 paravirt_pgd_free(mm, pgd);
34037 free_page((unsigned long)pgd);
34038diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
34039index 75cc097..79a097f 100644
34040--- a/arch/x86/mm/pgtable_32.c
34041+++ b/arch/x86/mm/pgtable_32.c
34042@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
34043 return;
34044 }
34045 pte = pte_offset_kernel(pmd, vaddr);
34046+
34047+ pax_open_kernel();
34048 if (pte_val(pteval))
34049 set_pte_at(&init_mm, vaddr, pte, pteval);
34050 else
34051 pte_clear(&init_mm, vaddr, pte);
34052+ pax_close_kernel();
34053
34054 /*
34055 * It's enough to flush this one mapping.
34056diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
34057index e666cbb..61788c45 100644
34058--- a/arch/x86/mm/physaddr.c
34059+++ b/arch/x86/mm/physaddr.c
34060@@ -10,7 +10,7 @@
34061 #ifdef CONFIG_X86_64
34062
34063 #ifdef CONFIG_DEBUG_VIRTUAL
34064-unsigned long __phys_addr(unsigned long x)
34065+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34066 {
34067 unsigned long y = x - __START_KERNEL_map;
34068
34069@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
34070 #else
34071
34072 #ifdef CONFIG_DEBUG_VIRTUAL
34073-unsigned long __phys_addr(unsigned long x)
34074+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34075 {
34076 unsigned long phys_addr = x - PAGE_OFFSET;
34077 /* VMALLOC_* aren't constants */
34078diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
34079index 90555bf..f5f1828 100644
34080--- a/arch/x86/mm/setup_nx.c
34081+++ b/arch/x86/mm/setup_nx.c
34082@@ -5,8 +5,10 @@
34083 #include <asm/pgtable.h>
34084 #include <asm/proto.h>
34085
34086+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34087 static int disable_nx;
34088
34089+#ifndef CONFIG_PAX_PAGEEXEC
34090 /*
34091 * noexec = on|off
34092 *
34093@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
34094 return 0;
34095 }
34096 early_param("noexec", noexec_setup);
34097+#endif
34098+
34099+#endif
34100
34101 void x86_configure_nx(void)
34102 {
34103+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34104 if (cpu_has_nx && !disable_nx)
34105 __supported_pte_mask |= _PAGE_NX;
34106 else
34107+#endif
34108 __supported_pte_mask &= ~_PAGE_NX;
34109 }
34110
34111diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
34112index 3250f23..7a97ba2 100644
34113--- a/arch/x86/mm/tlb.c
34114+++ b/arch/x86/mm/tlb.c
34115@@ -45,7 +45,11 @@ void leave_mm(int cpu)
34116 BUG();
34117 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
34118 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
34119+
34120+#ifndef CONFIG_PAX_PER_CPU_PGD
34121 load_cr3(swapper_pg_dir);
34122+#endif
34123+
34124 /*
34125 * This gets called in the idle path where RCU
34126 * functions differently. Tracing normally
34127diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
34128new file mode 100644
34129index 0000000..dace51c
34130--- /dev/null
34131+++ b/arch/x86/mm/uderef_64.c
34132@@ -0,0 +1,37 @@
34133+#include <linux/mm.h>
34134+#include <asm/pgtable.h>
34135+#include <asm/uaccess.h>
34136+
34137+#ifdef CONFIG_PAX_MEMORY_UDEREF
34138+/* PaX: due to the special call convention these functions must
34139+ * - remain leaf functions under all configurations,
34140+ * - never be called directly, only dereferenced from the wrappers.
34141+ */
34142+void __pax_open_userland(void)
34143+{
34144+ unsigned int cpu;
34145+
34146+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34147+ return;
34148+
34149+ cpu = raw_get_cpu();
34150+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
34151+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
34152+ raw_put_cpu_no_resched();
34153+}
34154+EXPORT_SYMBOL(__pax_open_userland);
34155+
34156+void __pax_close_userland(void)
34157+{
34158+ unsigned int cpu;
34159+
34160+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34161+ return;
34162+
34163+ cpu = raw_get_cpu();
34164+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
34165+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
34166+ raw_put_cpu_no_resched();
34167+}
34168+EXPORT_SYMBOL(__pax_close_userland);
34169+#endif
34170diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
34171index 6440221..f84b5c7 100644
34172--- a/arch/x86/net/bpf_jit.S
34173+++ b/arch/x86/net/bpf_jit.S
34174@@ -9,6 +9,7 @@
34175 */
34176 #include <linux/linkage.h>
34177 #include <asm/dwarf2.h>
34178+#include <asm/alternative-asm.h>
34179
34180 /*
34181 * Calling convention :
34182@@ -38,6 +39,7 @@ sk_load_word_positive_offset:
34183 jle bpf_slow_path_word
34184 mov (SKBDATA,%rsi),%eax
34185 bswap %eax /* ntohl() */
34186+ pax_force_retaddr
34187 ret
34188
34189 sk_load_half:
34190@@ -55,6 +57,7 @@ sk_load_half_positive_offset:
34191 jle bpf_slow_path_half
34192 movzwl (SKBDATA,%rsi),%eax
34193 rol $8,%ax # ntohs()
34194+ pax_force_retaddr
34195 ret
34196
34197 sk_load_byte:
34198@@ -69,6 +72,7 @@ sk_load_byte_positive_offset:
34199 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
34200 jle bpf_slow_path_byte
34201 movzbl (SKBDATA,%rsi),%eax
34202+ pax_force_retaddr
34203 ret
34204
34205 /* rsi contains offset and can be scratched */
34206@@ -90,6 +94,7 @@ bpf_slow_path_word:
34207 js bpf_error
34208 mov - MAX_BPF_STACK + 32(%rbp),%eax
34209 bswap %eax
34210+ pax_force_retaddr
34211 ret
34212
34213 bpf_slow_path_half:
34214@@ -98,12 +103,14 @@ bpf_slow_path_half:
34215 mov - MAX_BPF_STACK + 32(%rbp),%ax
34216 rol $8,%ax
34217 movzwl %ax,%eax
34218+ pax_force_retaddr
34219 ret
34220
34221 bpf_slow_path_byte:
34222 bpf_slow_path_common(1)
34223 js bpf_error
34224 movzbl - MAX_BPF_STACK + 32(%rbp),%eax
34225+ pax_force_retaddr
34226 ret
34227
34228 #define sk_negative_common(SIZE) \
34229@@ -126,6 +133,7 @@ sk_load_word_negative_offset:
34230 sk_negative_common(4)
34231 mov (%rax), %eax
34232 bswap %eax
34233+ pax_force_retaddr
34234 ret
34235
34236 bpf_slow_path_half_neg:
34237@@ -137,6 +145,7 @@ sk_load_half_negative_offset:
34238 mov (%rax),%ax
34239 rol $8,%ax
34240 movzwl %ax,%eax
34241+ pax_force_retaddr
34242 ret
34243
34244 bpf_slow_path_byte_neg:
34245@@ -146,6 +155,7 @@ sk_load_byte_negative_offset:
34246 .globl sk_load_byte_negative_offset
34247 sk_negative_common(1)
34248 movzbl (%rax), %eax
34249+ pax_force_retaddr
34250 ret
34251
34252 bpf_error:
34253@@ -156,4 +166,5 @@ bpf_error:
34254 mov - MAX_BPF_STACK + 16(%rbp),%r14
34255 mov - MAX_BPF_STACK + 24(%rbp),%r15
34256 leaveq
34257+ pax_force_retaddr
34258 ret
34259diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
34260index 9875143..36776ae 100644
34261--- a/arch/x86/net/bpf_jit_comp.c
34262+++ b/arch/x86/net/bpf_jit_comp.c
34263@@ -13,7 +13,11 @@
34264 #include <linux/if_vlan.h>
34265 #include <asm/cacheflush.h>
34266
34267+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
34268+int bpf_jit_enable __read_only;
34269+#else
34270 int bpf_jit_enable __read_mostly;
34271+#endif
34272
34273 /*
34274 * assembly code in arch/x86/net/bpf_jit.S
34275@@ -174,7 +178,9 @@ static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
34276 static void jit_fill_hole(void *area, unsigned int size)
34277 {
34278 /* fill whole space with int3 instructions */
34279+ pax_open_kernel();
34280 memset(area, 0xcc, size);
34281+ pax_close_kernel();
34282 }
34283
34284 struct jit_context {
34285@@ -559,6 +565,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
34286 if (is_ereg(dst_reg))
34287 EMIT1(0x41);
34288 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
34289+
34290+ /* emit 'movzwl eax, ax' */
34291+ if (is_ereg(dst_reg))
34292+ EMIT3(0x45, 0x0F, 0xB7);
34293+ else
34294+ EMIT2(0x0F, 0xB7);
34295+ EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
34296 break;
34297 case 32:
34298 /* emit 'bswap eax' to swap lower 4 bytes */
34299@@ -577,6 +590,27 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
34300 break;
34301
34302 case BPF_ALU | BPF_END | BPF_FROM_LE:
34303+ switch (imm32) {
34304+ case 16:
34305+ /* emit 'movzwl eax, ax' to zero extend 16-bit
34306+ * into 64 bit
34307+ */
34308+ if (is_ereg(dst_reg))
34309+ EMIT3(0x45, 0x0F, 0xB7);
34310+ else
34311+ EMIT2(0x0F, 0xB7);
34312+ EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
34313+ break;
34314+ case 32:
34315+ /* emit 'mov eax, eax' to clear upper 32-bits */
34316+ if (is_ereg(dst_reg))
34317+ EMIT1(0x45);
34318+ EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
34319+ break;
34320+ case 64:
34321+ /* nop */
34322+ break;
34323+ }
34324 break;
34325
34326 /* ST: *(u8*)(dst_reg + off) = imm */
34327@@ -896,7 +930,9 @@ common_load:
34328 pr_err("bpf_jit_compile fatal error\n");
34329 return -EFAULT;
34330 }
34331+ pax_open_kernel();
34332 memcpy(image + proglen, temp, ilen);
34333+ pax_close_kernel();
34334 }
34335 proglen += ilen;
34336 addrs[i] = proglen;
34337@@ -968,7 +1004,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
34338
34339 if (image) {
34340 bpf_flush_icache(header, image + proglen);
34341- set_memory_ro((unsigned long)header, header->pages);
34342 prog->bpf_func = (void *)image;
34343 prog->jited = true;
34344 }
34345@@ -981,12 +1016,8 @@ void bpf_jit_free(struct bpf_prog *fp)
34346 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
34347 struct bpf_binary_header *header = (void *)addr;
34348
34349- if (!fp->jited)
34350- goto free_filter;
34351+ if (fp->jited)
34352+ bpf_jit_binary_free(header);
34353
34354- set_memory_rw(addr, header->pages);
34355- bpf_jit_binary_free(header);
34356-
34357-free_filter:
34358 bpf_prog_unlock_free(fp);
34359 }
34360diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
34361index 5d04be5..2beeaa2 100644
34362--- a/arch/x86/oprofile/backtrace.c
34363+++ b/arch/x86/oprofile/backtrace.c
34364@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
34365 struct stack_frame_ia32 *fp;
34366 unsigned long bytes;
34367
34368- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34369+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34370 if (bytes != 0)
34371 return NULL;
34372
34373- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
34374+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
34375
34376 oprofile_add_trace(bufhead[0].return_address);
34377
34378@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
34379 struct stack_frame bufhead[2];
34380 unsigned long bytes;
34381
34382- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34383+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34384 if (bytes != 0)
34385 return NULL;
34386
34387@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
34388 {
34389 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
34390
34391- if (!user_mode_vm(regs)) {
34392+ if (!user_mode(regs)) {
34393 unsigned long stack = kernel_stack_pointer(regs);
34394 if (depth)
34395 dump_trace(NULL, regs, (unsigned long *)stack, 0,
34396diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
34397index 1d2e639..f6ef82a 100644
34398--- a/arch/x86/oprofile/nmi_int.c
34399+++ b/arch/x86/oprofile/nmi_int.c
34400@@ -23,6 +23,7 @@
34401 #include <asm/nmi.h>
34402 #include <asm/msr.h>
34403 #include <asm/apic.h>
34404+#include <asm/pgtable.h>
34405
34406 #include "op_counter.h"
34407 #include "op_x86_model.h"
34408@@ -785,8 +786,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
34409 if (ret)
34410 return ret;
34411
34412- if (!model->num_virt_counters)
34413- model->num_virt_counters = model->num_counters;
34414+ if (!model->num_virt_counters) {
34415+ pax_open_kernel();
34416+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
34417+ pax_close_kernel();
34418+ }
34419
34420 mux_init(ops);
34421
34422diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
34423index 50d86c0..7985318 100644
34424--- a/arch/x86/oprofile/op_model_amd.c
34425+++ b/arch/x86/oprofile/op_model_amd.c
34426@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
34427 num_counters = AMD64_NUM_COUNTERS;
34428 }
34429
34430- op_amd_spec.num_counters = num_counters;
34431- op_amd_spec.num_controls = num_counters;
34432- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34433+ pax_open_kernel();
34434+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
34435+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
34436+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34437+ pax_close_kernel();
34438
34439 return 0;
34440 }
34441diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
34442index d90528e..0127e2b 100644
34443--- a/arch/x86/oprofile/op_model_ppro.c
34444+++ b/arch/x86/oprofile/op_model_ppro.c
34445@@ -19,6 +19,7 @@
34446 #include <asm/msr.h>
34447 #include <asm/apic.h>
34448 #include <asm/nmi.h>
34449+#include <asm/pgtable.h>
34450
34451 #include "op_x86_model.h"
34452 #include "op_counter.h"
34453@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
34454
34455 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
34456
34457- op_arch_perfmon_spec.num_counters = num_counters;
34458- op_arch_perfmon_spec.num_controls = num_counters;
34459+ pax_open_kernel();
34460+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
34461+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
34462+ pax_close_kernel();
34463 }
34464
34465 static int arch_perfmon_init(struct oprofile_operations *ignore)
34466diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
34467index 71e8a67..6a313bb 100644
34468--- a/arch/x86/oprofile/op_x86_model.h
34469+++ b/arch/x86/oprofile/op_x86_model.h
34470@@ -52,7 +52,7 @@ struct op_x86_model_spec {
34471 void (*switch_ctrl)(struct op_x86_model_spec const *model,
34472 struct op_msrs const * const msrs);
34473 #endif
34474-};
34475+} __do_const;
34476
34477 struct op_counter_config;
34478
34479diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
34480index 852aa4c..71613f2 100644
34481--- a/arch/x86/pci/intel_mid_pci.c
34482+++ b/arch/x86/pci/intel_mid_pci.c
34483@@ -258,7 +258,7 @@ int __init intel_mid_pci_init(void)
34484 pci_mmcfg_late_init();
34485 pcibios_enable_irq = intel_mid_pci_irq_enable;
34486 pcibios_disable_irq = intel_mid_pci_irq_disable;
34487- pci_root_ops = intel_mid_pci_ops;
34488+ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
34489 pci_soc_mode = 1;
34490 /* Continue with standard init */
34491 return 1;
34492diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
34493index 5dc6ca5..25c03f5 100644
34494--- a/arch/x86/pci/irq.c
34495+++ b/arch/x86/pci/irq.c
34496@@ -51,7 +51,7 @@ struct irq_router {
34497 struct irq_router_handler {
34498 u16 vendor;
34499 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
34500-};
34501+} __do_const;
34502
34503 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
34504 void (*pcibios_disable_irq)(struct pci_dev *dev) = pirq_disable_irq;
34505@@ -791,7 +791,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
34506 return 0;
34507 }
34508
34509-static __initdata struct irq_router_handler pirq_routers[] = {
34510+static __initconst const struct irq_router_handler pirq_routers[] = {
34511 { PCI_VENDOR_ID_INTEL, intel_router_probe },
34512 { PCI_VENDOR_ID_AL, ali_router_probe },
34513 { PCI_VENDOR_ID_ITE, ite_router_probe },
34514@@ -818,7 +818,7 @@ static struct pci_dev *pirq_router_dev;
34515 static void __init pirq_find_router(struct irq_router *r)
34516 {
34517 struct irq_routing_table *rt = pirq_table;
34518- struct irq_router_handler *h;
34519+ const struct irq_router_handler *h;
34520
34521 #ifdef CONFIG_PCI_BIOS
34522 if (!rt->signature) {
34523@@ -1091,7 +1091,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
34524 return 0;
34525 }
34526
34527-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
34528+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
34529 {
34530 .callback = fix_broken_hp_bios_irq9,
34531 .ident = "HP Pavilion N5400 Series Laptop",
34532diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
34533index 9b83b90..4112152 100644
34534--- a/arch/x86/pci/pcbios.c
34535+++ b/arch/x86/pci/pcbios.c
34536@@ -79,7 +79,7 @@ union bios32 {
34537 static struct {
34538 unsigned long address;
34539 unsigned short segment;
34540-} bios32_indirect __initdata = { 0, __KERNEL_CS };
34541+} bios32_indirect __initconst = { 0, __PCIBIOS_CS };
34542
34543 /*
34544 * Returns the entry point for the given service, NULL on error
34545@@ -92,37 +92,80 @@ static unsigned long __init bios32_service(unsigned long service)
34546 unsigned long length; /* %ecx */
34547 unsigned long entry; /* %edx */
34548 unsigned long flags;
34549+ struct desc_struct d, *gdt;
34550
34551 local_irq_save(flags);
34552- __asm__("lcall *(%%edi); cld"
34553+
34554+ gdt = get_cpu_gdt_table(smp_processor_id());
34555+
34556+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
34557+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34558+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
34559+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34560+
34561+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
34562 : "=a" (return_code),
34563 "=b" (address),
34564 "=c" (length),
34565 "=d" (entry)
34566 : "0" (service),
34567 "1" (0),
34568- "D" (&bios32_indirect));
34569+ "D" (&bios32_indirect),
34570+ "r"(__PCIBIOS_DS)
34571+ : "memory");
34572+
34573+ pax_open_kernel();
34574+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
34575+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
34576+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
34577+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
34578+ pax_close_kernel();
34579+
34580 local_irq_restore(flags);
34581
34582 switch (return_code) {
34583- case 0:
34584- return address + entry;
34585- case 0x80: /* Not present */
34586- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34587- return 0;
34588- default: /* Shouldn't happen */
34589- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34590- service, return_code);
34591+ case 0: {
34592+ int cpu;
34593+ unsigned char flags;
34594+
34595+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
34596+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
34597+ printk(KERN_WARNING "bios32_service: not valid\n");
34598 return 0;
34599+ }
34600+ address = address + PAGE_OFFSET;
34601+ length += 16UL; /* some BIOSs underreport this... */
34602+ flags = 4;
34603+ if (length >= 64*1024*1024) {
34604+ length >>= PAGE_SHIFT;
34605+ flags |= 8;
34606+ }
34607+
34608+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
34609+ gdt = get_cpu_gdt_table(cpu);
34610+ pack_descriptor(&d, address, length, 0x9b, flags);
34611+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34612+ pack_descriptor(&d, address, length, 0x93, flags);
34613+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34614+ }
34615+ return entry;
34616+ }
34617+ case 0x80: /* Not present */
34618+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34619+ return 0;
34620+ default: /* Shouldn't happen */
34621+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34622+ service, return_code);
34623+ return 0;
34624 }
34625 }
34626
34627 static struct {
34628 unsigned long address;
34629 unsigned short segment;
34630-} pci_indirect = { 0, __KERNEL_CS };
34631+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
34632
34633-static int pci_bios_present;
34634+static int pci_bios_present __read_only;
34635
34636 static int __init check_pcibios(void)
34637 {
34638@@ -131,11 +174,13 @@ static int __init check_pcibios(void)
34639 unsigned long flags, pcibios_entry;
34640
34641 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
34642- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
34643+ pci_indirect.address = pcibios_entry;
34644
34645 local_irq_save(flags);
34646- __asm__(
34647- "lcall *(%%edi); cld\n\t"
34648+ __asm__("movw %w6, %%ds\n\t"
34649+ "lcall *%%ss:(%%edi); cld\n\t"
34650+ "push %%ss\n\t"
34651+ "pop %%ds\n\t"
34652 "jc 1f\n\t"
34653 "xor %%ah, %%ah\n"
34654 "1:"
34655@@ -144,7 +189,8 @@ static int __init check_pcibios(void)
34656 "=b" (ebx),
34657 "=c" (ecx)
34658 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
34659- "D" (&pci_indirect)
34660+ "D" (&pci_indirect),
34661+ "r" (__PCIBIOS_DS)
34662 : "memory");
34663 local_irq_restore(flags);
34664
34665@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34666
34667 switch (len) {
34668 case 1:
34669- __asm__("lcall *(%%esi); cld\n\t"
34670+ __asm__("movw %w6, %%ds\n\t"
34671+ "lcall *%%ss:(%%esi); cld\n\t"
34672+ "push %%ss\n\t"
34673+ "pop %%ds\n\t"
34674 "jc 1f\n\t"
34675 "xor %%ah, %%ah\n"
34676 "1:"
34677@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34678 : "1" (PCIBIOS_READ_CONFIG_BYTE),
34679 "b" (bx),
34680 "D" ((long)reg),
34681- "S" (&pci_indirect));
34682+ "S" (&pci_indirect),
34683+ "r" (__PCIBIOS_DS));
34684 /*
34685 * Zero-extend the result beyond 8 bits, do not trust the
34686 * BIOS having done it:
34687@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34688 *value &= 0xff;
34689 break;
34690 case 2:
34691- __asm__("lcall *(%%esi); cld\n\t"
34692+ __asm__("movw %w6, %%ds\n\t"
34693+ "lcall *%%ss:(%%esi); cld\n\t"
34694+ "push %%ss\n\t"
34695+ "pop %%ds\n\t"
34696 "jc 1f\n\t"
34697 "xor %%ah, %%ah\n"
34698 "1:"
34699@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34700 : "1" (PCIBIOS_READ_CONFIG_WORD),
34701 "b" (bx),
34702 "D" ((long)reg),
34703- "S" (&pci_indirect));
34704+ "S" (&pci_indirect),
34705+ "r" (__PCIBIOS_DS));
34706 /*
34707 * Zero-extend the result beyond 16 bits, do not trust the
34708 * BIOS having done it:
34709@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34710 *value &= 0xffff;
34711 break;
34712 case 4:
34713- __asm__("lcall *(%%esi); cld\n\t"
34714+ __asm__("movw %w6, %%ds\n\t"
34715+ "lcall *%%ss:(%%esi); cld\n\t"
34716+ "push %%ss\n\t"
34717+ "pop %%ds\n\t"
34718 "jc 1f\n\t"
34719 "xor %%ah, %%ah\n"
34720 "1:"
34721@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34722 : "1" (PCIBIOS_READ_CONFIG_DWORD),
34723 "b" (bx),
34724 "D" ((long)reg),
34725- "S" (&pci_indirect));
34726+ "S" (&pci_indirect),
34727+ "r" (__PCIBIOS_DS));
34728 break;
34729 }
34730
34731@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34732
34733 switch (len) {
34734 case 1:
34735- __asm__("lcall *(%%esi); cld\n\t"
34736+ __asm__("movw %w6, %%ds\n\t"
34737+ "lcall *%%ss:(%%esi); cld\n\t"
34738+ "push %%ss\n\t"
34739+ "pop %%ds\n\t"
34740 "jc 1f\n\t"
34741 "xor %%ah, %%ah\n"
34742 "1:"
34743@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34744 "c" (value),
34745 "b" (bx),
34746 "D" ((long)reg),
34747- "S" (&pci_indirect));
34748+ "S" (&pci_indirect),
34749+ "r" (__PCIBIOS_DS));
34750 break;
34751 case 2:
34752- __asm__("lcall *(%%esi); cld\n\t"
34753+ __asm__("movw %w6, %%ds\n\t"
34754+ "lcall *%%ss:(%%esi); cld\n\t"
34755+ "push %%ss\n\t"
34756+ "pop %%ds\n\t"
34757 "jc 1f\n\t"
34758 "xor %%ah, %%ah\n"
34759 "1:"
34760@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34761 "c" (value),
34762 "b" (bx),
34763 "D" ((long)reg),
34764- "S" (&pci_indirect));
34765+ "S" (&pci_indirect),
34766+ "r" (__PCIBIOS_DS));
34767 break;
34768 case 4:
34769- __asm__("lcall *(%%esi); cld\n\t"
34770+ __asm__("movw %w6, %%ds\n\t"
34771+ "lcall *%%ss:(%%esi); cld\n\t"
34772+ "push %%ss\n\t"
34773+ "pop %%ds\n\t"
34774 "jc 1f\n\t"
34775 "xor %%ah, %%ah\n"
34776 "1:"
34777@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34778 "c" (value),
34779 "b" (bx),
34780 "D" ((long)reg),
34781- "S" (&pci_indirect));
34782+ "S" (&pci_indirect),
34783+ "r" (__PCIBIOS_DS));
34784 break;
34785 }
34786
34787@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34788
34789 DBG("PCI: Fetching IRQ routing table... ");
34790 __asm__("push %%es\n\t"
34791+ "movw %w8, %%ds\n\t"
34792 "push %%ds\n\t"
34793 "pop %%es\n\t"
34794- "lcall *(%%esi); cld\n\t"
34795+ "lcall *%%ss:(%%esi); cld\n\t"
34796 "pop %%es\n\t"
34797+ "push %%ss\n\t"
34798+ "pop %%ds\n"
34799 "jc 1f\n\t"
34800 "xor %%ah, %%ah\n"
34801 "1:"
34802@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34803 "1" (0),
34804 "D" ((long) &opt),
34805 "S" (&pci_indirect),
34806- "m" (opt)
34807+ "m" (opt),
34808+ "r" (__PCIBIOS_DS)
34809 : "memory");
34810 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
34811 if (ret & 0xff00)
34812@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34813 {
34814 int ret;
34815
34816- __asm__("lcall *(%%esi); cld\n\t"
34817+ __asm__("movw %w5, %%ds\n\t"
34818+ "lcall *%%ss:(%%esi); cld\n\t"
34819+ "push %%ss\n\t"
34820+ "pop %%ds\n"
34821 "jc 1f\n\t"
34822 "xor %%ah, %%ah\n"
34823 "1:"
34824@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34825 : "0" (PCIBIOS_SET_PCI_HW_INT),
34826 "b" ((dev->bus->number << 8) | dev->devfn),
34827 "c" ((irq << 8) | (pin + 10)),
34828- "S" (&pci_indirect));
34829+ "S" (&pci_indirect),
34830+ "r" (__PCIBIOS_DS));
34831 return !(ret & 0xff00);
34832 }
34833 EXPORT_SYMBOL(pcibios_set_irq_routing);
34834diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
34835index 40e7cda..c7e6672 100644
34836--- a/arch/x86/platform/efi/efi_32.c
34837+++ b/arch/x86/platform/efi/efi_32.c
34838@@ -61,11 +61,22 @@ void __init efi_call_phys_prolog(void)
34839 {
34840 struct desc_ptr gdt_descr;
34841
34842+#ifdef CONFIG_PAX_KERNEXEC
34843+ struct desc_struct d;
34844+#endif
34845+
34846 local_irq_save(efi_rt_eflags);
34847
34848 load_cr3(initial_page_table);
34849 __flush_tlb_all();
34850
34851+#ifdef CONFIG_PAX_KERNEXEC
34852+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
34853+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34854+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
34855+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34856+#endif
34857+
34858 gdt_descr.address = __pa(get_cpu_gdt_table(0));
34859 gdt_descr.size = GDT_SIZE - 1;
34860 load_gdt(&gdt_descr);
34861@@ -75,11 +86,24 @@ void __init efi_call_phys_epilog(void)
34862 {
34863 struct desc_ptr gdt_descr;
34864
34865+#ifdef CONFIG_PAX_KERNEXEC
34866+ struct desc_struct d;
34867+
34868+ memset(&d, 0, sizeof d);
34869+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34870+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34871+#endif
34872+
34873 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
34874 gdt_descr.size = GDT_SIZE - 1;
34875 load_gdt(&gdt_descr);
34876
34877+#ifdef CONFIG_PAX_PER_CPU_PGD
34878+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34879+#else
34880 load_cr3(swapper_pg_dir);
34881+#endif
34882+
34883 __flush_tlb_all();
34884
34885 local_irq_restore(efi_rt_eflags);
34886diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
34887index 17e80d8..9fa6e41 100644
34888--- a/arch/x86/platform/efi/efi_64.c
34889+++ b/arch/x86/platform/efi/efi_64.c
34890@@ -98,6 +98,11 @@ void __init efi_call_phys_prolog(void)
34891 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
34892 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
34893 }
34894+
34895+#ifdef CONFIG_PAX_PER_CPU_PGD
34896+ load_cr3(swapper_pg_dir);
34897+#endif
34898+
34899 __flush_tlb_all();
34900 }
34901
34902@@ -115,6 +120,11 @@ void __init efi_call_phys_epilog(void)
34903 for (pgd = 0; pgd < n_pgds; pgd++)
34904 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
34905 kfree(save_pgd);
34906+
34907+#ifdef CONFIG_PAX_PER_CPU_PGD
34908+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34909+#endif
34910+
34911 __flush_tlb_all();
34912 local_irq_restore(efi_flags);
34913 early_code_mapping_set_exec(0);
34914@@ -145,8 +155,23 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
34915 unsigned npages;
34916 pgd_t *pgd;
34917
34918- if (efi_enabled(EFI_OLD_MEMMAP))
34919+ if (efi_enabled(EFI_OLD_MEMMAP)) {
34920+ /* PaX: We need to disable the NX bit in the PGD, otherwise we won't be
34921+ * able to execute the EFI services.
34922+ */
34923+ if (__supported_pte_mask & _PAGE_NX) {
34924+ unsigned long addr = (unsigned long) __va(0);
34925+ pgd_t pe = __pgd(pgd_val(*pgd_offset_k(addr)) & ~_PAGE_NX);
34926+
34927+ pr_alert("PAX: Disabling NX protection for low memory map. Try booting without \"efi=old_map\"\n");
34928+#ifdef CONFIG_PAX_PER_CPU_PGD
34929+ set_pgd(pgd_offset_cpu(0, kernel, addr), pe);
34930+#endif
34931+ set_pgd(pgd_offset_k(addr), pe);
34932+ }
34933+
34934 return 0;
34935+ }
34936
34937 efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
34938 pgd = __va(efi_scratch.efi_pgt);
34939diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
34940index 040192b..7d3300f 100644
34941--- a/arch/x86/platform/efi/efi_stub_32.S
34942+++ b/arch/x86/platform/efi/efi_stub_32.S
34943@@ -6,7 +6,9 @@
34944 */
34945
34946 #include <linux/linkage.h>
34947+#include <linux/init.h>
34948 #include <asm/page_types.h>
34949+#include <asm/segment.h>
34950
34951 /*
34952 * efi_call_phys(void *, ...) is a function with variable parameters.
34953@@ -20,7 +22,7 @@
34954 * service functions will comply with gcc calling convention, too.
34955 */
34956
34957-.text
34958+__INIT
34959 ENTRY(efi_call_phys)
34960 /*
34961 * 0. The function can only be called in Linux kernel. So CS has been
34962@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
34963 * The mapping of lower virtual memory has been created in prolog and
34964 * epilog.
34965 */
34966- movl $1f, %edx
34967- subl $__PAGE_OFFSET, %edx
34968- jmp *%edx
34969+#ifdef CONFIG_PAX_KERNEXEC
34970+ movl $(__KERNEXEC_EFI_DS), %edx
34971+ mov %edx, %ds
34972+ mov %edx, %es
34973+ mov %edx, %ss
34974+ addl $2f,(1f)
34975+ ljmp *(1f)
34976+
34977+__INITDATA
34978+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
34979+.previous
34980+
34981+2:
34982+ subl $2b,(1b)
34983+#else
34984+ jmp 1f-__PAGE_OFFSET
34985 1:
34986+#endif
34987
34988 /*
34989 * 2. Now on the top of stack is the return
34990@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
34991 * parameter 2, ..., param n. To make things easy, we save the return
34992 * address of efi_call_phys in a global variable.
34993 */
34994- popl %edx
34995- movl %edx, saved_return_addr
34996- /* get the function pointer into ECX*/
34997- popl %ecx
34998- movl %ecx, efi_rt_function_ptr
34999- movl $2f, %edx
35000- subl $__PAGE_OFFSET, %edx
35001- pushl %edx
35002+ popl (saved_return_addr)
35003+ popl (efi_rt_function_ptr)
35004
35005 /*
35006 * 3. Clear PG bit in %CR0.
35007@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
35008 /*
35009 * 5. Call the physical function.
35010 */
35011- jmp *%ecx
35012+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
35013
35014-2:
35015 /*
35016 * 6. After EFI runtime service returns, control will return to
35017 * following instruction. We'd better readjust stack pointer first.
35018@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
35019 movl %cr0, %edx
35020 orl $0x80000000, %edx
35021 movl %edx, %cr0
35022- jmp 1f
35023-1:
35024+
35025 /*
35026 * 8. Now restore the virtual mode from flat mode by
35027 * adding EIP with PAGE_OFFSET.
35028 */
35029- movl $1f, %edx
35030- jmp *%edx
35031+#ifdef CONFIG_PAX_KERNEXEC
35032+ movl $(__KERNEL_DS), %edx
35033+ mov %edx, %ds
35034+ mov %edx, %es
35035+ mov %edx, %ss
35036+ ljmp $(__KERNEL_CS),$1f
35037+#else
35038+ jmp 1f+__PAGE_OFFSET
35039+#endif
35040 1:
35041
35042 /*
35043 * 9. Balance the stack. And because EAX contain the return value,
35044 * we'd better not clobber it.
35045 */
35046- leal efi_rt_function_ptr, %edx
35047- movl (%edx), %ecx
35048- pushl %ecx
35049+ pushl (efi_rt_function_ptr)
35050
35051 /*
35052- * 10. Push the saved return address onto the stack and return.
35053+ * 10. Return to the saved return address.
35054 */
35055- leal saved_return_addr, %edx
35056- movl (%edx), %ecx
35057- pushl %ecx
35058- ret
35059+ jmpl *(saved_return_addr)
35060 ENDPROC(efi_call_phys)
35061 .previous
35062
35063-.data
35064+__INITDATA
35065 saved_return_addr:
35066 .long 0
35067 efi_rt_function_ptr:
35068diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
35069index 86d0f9e..6d499f4 100644
35070--- a/arch/x86/platform/efi/efi_stub_64.S
35071+++ b/arch/x86/platform/efi/efi_stub_64.S
35072@@ -11,6 +11,7 @@
35073 #include <asm/msr.h>
35074 #include <asm/processor-flags.h>
35075 #include <asm/page_types.h>
35076+#include <asm/alternative-asm.h>
35077
35078 #define SAVE_XMM \
35079 mov %rsp, %rax; \
35080@@ -88,6 +89,7 @@ ENTRY(efi_call)
35081 RESTORE_PGT
35082 addq $48, %rsp
35083 RESTORE_XMM
35084+ pax_force_retaddr 0, 1
35085 ret
35086 ENDPROC(efi_call)
35087
35088diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
35089index 3005f0c..d06aeb0 100644
35090--- a/arch/x86/platform/intel-mid/intel-mid.c
35091+++ b/arch/x86/platform/intel-mid/intel-mid.c
35092@@ -63,7 +63,7 @@ enum intel_mid_timer_options intel_mid_timer_options;
35093 /* intel_mid_ops to store sub arch ops */
35094 struct intel_mid_ops *intel_mid_ops;
35095 /* getter function for sub arch ops*/
35096-static void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
35097+static const void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
35098 enum intel_mid_cpu_type __intel_mid_cpu_chip;
35099 EXPORT_SYMBOL_GPL(__intel_mid_cpu_chip);
35100
35101@@ -71,9 +71,10 @@ static void intel_mid_power_off(void)
35102 {
35103 };
35104
35105-static void intel_mid_reboot(void)
35106+static void __noreturn intel_mid_reboot(void)
35107 {
35108 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
35109+ BUG();
35110 }
35111
35112 static unsigned long __init intel_mid_calibrate_tsc(void)
35113diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35114index 3c1c386..59a68ed 100644
35115--- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35116+++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35117@@ -13,6 +13,6 @@
35118 /* For every CPU addition a new get_<cpuname>_ops interface needs
35119 * to be added.
35120 */
35121-extern void *get_penwell_ops(void);
35122-extern void *get_cloverview_ops(void);
35123-extern void *get_tangier_ops(void);
35124+extern const void *get_penwell_ops(void);
35125+extern const void *get_cloverview_ops(void);
35126+extern const void *get_tangier_ops(void);
35127diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
35128index 23381d2..8ddc10e 100644
35129--- a/arch/x86/platform/intel-mid/mfld.c
35130+++ b/arch/x86/platform/intel-mid/mfld.c
35131@@ -64,12 +64,12 @@ static void __init penwell_arch_setup(void)
35132 pm_power_off = mfld_power_off;
35133 }
35134
35135-void *get_penwell_ops(void)
35136+const void *get_penwell_ops(void)
35137 {
35138 return &penwell_ops;
35139 }
35140
35141-void *get_cloverview_ops(void)
35142+const void *get_cloverview_ops(void)
35143 {
35144 return &penwell_ops;
35145 }
35146diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c
35147index aaca917..66eadbc 100644
35148--- a/arch/x86/platform/intel-mid/mrfl.c
35149+++ b/arch/x86/platform/intel-mid/mrfl.c
35150@@ -97,7 +97,7 @@ static struct intel_mid_ops tangier_ops = {
35151 .arch_setup = tangier_arch_setup,
35152 };
35153
35154-void *get_tangier_ops(void)
35155+const void *get_tangier_ops(void)
35156 {
35157 return &tangier_ops;
35158 }
35159diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c
35160index c9a0838..fae0977 100644
35161--- a/arch/x86/platform/intel-quark/imr_selftest.c
35162+++ b/arch/x86/platform/intel-quark/imr_selftest.c
35163@@ -54,7 +54,7 @@ static void __init imr_self_test_result(int res, const char *fmt, ...)
35164 */
35165 static void __init imr_self_test(void)
35166 {
35167- phys_addr_t base = virt_to_phys(&_text);
35168+ phys_addr_t base = virt_to_phys(ktla_ktva(_text));
35169 size_t size = virt_to_phys(&__end_rodata) - base;
35170 const char *fmt_over = "overlapped IMR @ (0x%08lx - 0x%08lx)\n";
35171 int ret;
35172diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
35173index d6ee929..3637cb5 100644
35174--- a/arch/x86/platform/olpc/olpc_dt.c
35175+++ b/arch/x86/platform/olpc/olpc_dt.c
35176@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
35177 return res;
35178 }
35179
35180-static struct of_pdt_ops prom_olpc_ops __initdata = {
35181+static struct of_pdt_ops prom_olpc_ops __initconst = {
35182 .nextprop = olpc_dt_nextprop,
35183 .getproplen = olpc_dt_getproplen,
35184 .getproperty = olpc_dt_getproperty,
35185diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
35186index 3e32ed5..cc0adc5 100644
35187--- a/arch/x86/power/cpu.c
35188+++ b/arch/x86/power/cpu.c
35189@@ -134,11 +134,8 @@ static void do_fpu_end(void)
35190 static void fix_processor_context(void)
35191 {
35192 int cpu = smp_processor_id();
35193- struct tss_struct *t = &per_cpu(init_tss, cpu);
35194-#ifdef CONFIG_X86_64
35195- struct desc_struct *desc = get_cpu_gdt_table(cpu);
35196- tss_desc tss;
35197-#endif
35198+ struct tss_struct *t = init_tss + cpu;
35199+
35200 set_tss_desc(cpu, t); /*
35201 * This just modifies memory; should not be
35202 * necessary. But... This is necessary, because
35203@@ -147,10 +144,6 @@ static void fix_processor_context(void)
35204 */
35205
35206 #ifdef CONFIG_X86_64
35207- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
35208- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
35209- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
35210-
35211 syscall_init(); /* This sets MSR_*STAR and related */
35212 #endif
35213 load_TR_desc(); /* This does ltr */
35214diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
35215index 0b7a63d..0d0f2c2 100644
35216--- a/arch/x86/realmode/init.c
35217+++ b/arch/x86/realmode/init.c
35218@@ -68,7 +68,13 @@ void __init setup_real_mode(void)
35219 __va(real_mode_header->trampoline_header);
35220
35221 #ifdef CONFIG_X86_32
35222- trampoline_header->start = __pa_symbol(startup_32_smp);
35223+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
35224+
35225+#ifdef CONFIG_PAX_KERNEXEC
35226+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
35227+#endif
35228+
35229+ trampoline_header->boot_cs = __BOOT_CS;
35230 trampoline_header->gdt_limit = __BOOT_DS + 7;
35231 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
35232 #else
35233@@ -84,7 +90,7 @@ void __init setup_real_mode(void)
35234 *trampoline_cr4_features = __read_cr4();
35235
35236 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
35237- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
35238+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
35239 trampoline_pgd[511] = init_level4_pgt[511].pgd;
35240 #endif
35241 }
35242diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
35243index 2730d77..2e4cd19 100644
35244--- a/arch/x86/realmode/rm/Makefile
35245+++ b/arch/x86/realmode/rm/Makefile
35246@@ -68,5 +68,8 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
35247
35248 KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
35249 -I$(srctree)/arch/x86/boot
35250+ifdef CONSTIFY_PLUGIN
35251+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
35252+endif
35253 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
35254 GCOV_PROFILE := n
35255diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
35256index a28221d..93c40f1 100644
35257--- a/arch/x86/realmode/rm/header.S
35258+++ b/arch/x86/realmode/rm/header.S
35259@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
35260 #endif
35261 /* APM/BIOS reboot */
35262 .long pa_machine_real_restart_asm
35263-#ifdef CONFIG_X86_64
35264+#ifdef CONFIG_X86_32
35265+ .long __KERNEL_CS
35266+#else
35267 .long __KERNEL32_CS
35268 #endif
35269 END(real_mode_header)
35270diff --git a/arch/x86/realmode/rm/reboot.S b/arch/x86/realmode/rm/reboot.S
35271index d66c607..3def845 100644
35272--- a/arch/x86/realmode/rm/reboot.S
35273+++ b/arch/x86/realmode/rm/reboot.S
35274@@ -27,6 +27,10 @@ ENTRY(machine_real_restart_asm)
35275 lgdtl pa_tr_gdt
35276
35277 /* Disable paging to drop us out of long mode */
35278+ movl %cr4, %eax
35279+ andl $~X86_CR4_PCIDE, %eax
35280+ movl %eax, %cr4
35281+
35282 movl %cr0, %eax
35283 andl $~X86_CR0_PG, %eax
35284 movl %eax, %cr0
35285diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
35286index 48ddd76..c26749f 100644
35287--- a/arch/x86/realmode/rm/trampoline_32.S
35288+++ b/arch/x86/realmode/rm/trampoline_32.S
35289@@ -24,6 +24,12 @@
35290 #include <asm/page_types.h>
35291 #include "realmode.h"
35292
35293+#ifdef CONFIG_PAX_KERNEXEC
35294+#define ta(X) (X)
35295+#else
35296+#define ta(X) (pa_ ## X)
35297+#endif
35298+
35299 .text
35300 .code16
35301
35302@@ -38,8 +44,6 @@ ENTRY(trampoline_start)
35303
35304 cli # We should be safe anyway
35305
35306- movl tr_start, %eax # where we need to go
35307-
35308 movl $0xA5A5A5A5, trampoline_status
35309 # write marker for master knows we're running
35310
35311@@ -55,7 +59,7 @@ ENTRY(trampoline_start)
35312 movw $1, %dx # protected mode (PE) bit
35313 lmsw %dx # into protected mode
35314
35315- ljmpl $__BOOT_CS, $pa_startup_32
35316+ ljmpl *(trampoline_header)
35317
35318 .section ".text32","ax"
35319 .code32
35320@@ -66,7 +70,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
35321 .balign 8
35322 GLOBAL(trampoline_header)
35323 tr_start: .space 4
35324- tr_gdt_pad: .space 2
35325+ tr_boot_cs: .space 2
35326 tr_gdt: .space 6
35327 END(trampoline_header)
35328
35329diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
35330index dac7b20..72dbaca 100644
35331--- a/arch/x86/realmode/rm/trampoline_64.S
35332+++ b/arch/x86/realmode/rm/trampoline_64.S
35333@@ -93,6 +93,7 @@ ENTRY(startup_32)
35334 movl %edx, %gs
35335
35336 movl pa_tr_cr4, %eax
35337+ andl $~X86_CR4_PCIDE, %eax
35338 movl %eax, %cr4 # Enable PAE mode
35339
35340 # Setup trampoline 4 level pagetables
35341@@ -106,7 +107,7 @@ ENTRY(startup_32)
35342 wrmsr
35343
35344 # Enable paging and in turn activate Long Mode
35345- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
35346+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
35347 movl %eax, %cr0
35348
35349 /*
35350diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
35351index 9e7e147..25a4158 100644
35352--- a/arch/x86/realmode/rm/wakeup_asm.S
35353+++ b/arch/x86/realmode/rm/wakeup_asm.S
35354@@ -126,11 +126,10 @@ ENTRY(wakeup_start)
35355 lgdtl pmode_gdt
35356
35357 /* This really couldn't... */
35358- movl pmode_entry, %eax
35359 movl pmode_cr0, %ecx
35360 movl %ecx, %cr0
35361- ljmpl $__KERNEL_CS, $pa_startup_32
35362- /* -> jmp *%eax in trampoline_32.S */
35363+
35364+ ljmpl *pmode_entry
35365 #else
35366 jmp trampoline_start
35367 #endif
35368diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
35369index 604a37e..e49702a 100644
35370--- a/arch/x86/tools/Makefile
35371+++ b/arch/x86/tools/Makefile
35372@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
35373
35374 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
35375
35376-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
35377+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
35378 hostprogs-y += relocs
35379 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
35380 PHONY += relocs
35381diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
35382index 0c2fae8..88036b7 100644
35383--- a/arch/x86/tools/relocs.c
35384+++ b/arch/x86/tools/relocs.c
35385@@ -1,5 +1,7 @@
35386 /* This is included from relocs_32/64.c */
35387
35388+#include "../../../include/generated/autoconf.h"
35389+
35390 #define ElfW(type) _ElfW(ELF_BITS, type)
35391 #define _ElfW(bits, type) __ElfW(bits, type)
35392 #define __ElfW(bits, type) Elf##bits##_##type
35393@@ -11,6 +13,7 @@
35394 #define Elf_Sym ElfW(Sym)
35395
35396 static Elf_Ehdr ehdr;
35397+static Elf_Phdr *phdr;
35398
35399 struct relocs {
35400 uint32_t *offset;
35401@@ -386,9 +389,39 @@ static void read_ehdr(FILE *fp)
35402 }
35403 }
35404
35405+static void read_phdrs(FILE *fp)
35406+{
35407+ unsigned int i;
35408+
35409+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
35410+ if (!phdr) {
35411+ die("Unable to allocate %d program headers\n",
35412+ ehdr.e_phnum);
35413+ }
35414+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
35415+ die("Seek to %d failed: %s\n",
35416+ ehdr.e_phoff, strerror(errno));
35417+ }
35418+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
35419+ die("Cannot read ELF program headers: %s\n",
35420+ strerror(errno));
35421+ }
35422+ for(i = 0; i < ehdr.e_phnum; i++) {
35423+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
35424+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
35425+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
35426+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
35427+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
35428+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
35429+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
35430+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
35431+ }
35432+
35433+}
35434+
35435 static void read_shdrs(FILE *fp)
35436 {
35437- int i;
35438+ unsigned int i;
35439 Elf_Shdr shdr;
35440
35441 secs = calloc(ehdr.e_shnum, sizeof(struct section));
35442@@ -423,7 +456,7 @@ static void read_shdrs(FILE *fp)
35443
35444 static void read_strtabs(FILE *fp)
35445 {
35446- int i;
35447+ unsigned int i;
35448 for (i = 0; i < ehdr.e_shnum; i++) {
35449 struct section *sec = &secs[i];
35450 if (sec->shdr.sh_type != SHT_STRTAB) {
35451@@ -448,7 +481,7 @@ static void read_strtabs(FILE *fp)
35452
35453 static void read_symtabs(FILE *fp)
35454 {
35455- int i,j;
35456+ unsigned int i,j;
35457 for (i = 0; i < ehdr.e_shnum; i++) {
35458 struct section *sec = &secs[i];
35459 if (sec->shdr.sh_type != SHT_SYMTAB) {
35460@@ -479,9 +512,11 @@ static void read_symtabs(FILE *fp)
35461 }
35462
35463
35464-static void read_relocs(FILE *fp)
35465+static void read_relocs(FILE *fp, int use_real_mode)
35466 {
35467- int i,j;
35468+ unsigned int i,j;
35469+ uint32_t base;
35470+
35471 for (i = 0; i < ehdr.e_shnum; i++) {
35472 struct section *sec = &secs[i];
35473 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35474@@ -501,9 +536,22 @@ static void read_relocs(FILE *fp)
35475 die("Cannot read symbol table: %s\n",
35476 strerror(errno));
35477 }
35478+ base = 0;
35479+
35480+#ifdef CONFIG_X86_32
35481+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
35482+ if (phdr[j].p_type != PT_LOAD )
35483+ continue;
35484+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
35485+ continue;
35486+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
35487+ break;
35488+ }
35489+#endif
35490+
35491 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
35492 Elf_Rel *rel = &sec->reltab[j];
35493- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
35494+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
35495 rel->r_info = elf_xword_to_cpu(rel->r_info);
35496 #if (SHT_REL_TYPE == SHT_RELA)
35497 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
35498@@ -515,7 +563,7 @@ static void read_relocs(FILE *fp)
35499
35500 static void print_absolute_symbols(void)
35501 {
35502- int i;
35503+ unsigned int i;
35504 const char *format;
35505
35506 if (ELF_BITS == 64)
35507@@ -528,7 +576,7 @@ static void print_absolute_symbols(void)
35508 for (i = 0; i < ehdr.e_shnum; i++) {
35509 struct section *sec = &secs[i];
35510 char *sym_strtab;
35511- int j;
35512+ unsigned int j;
35513
35514 if (sec->shdr.sh_type != SHT_SYMTAB) {
35515 continue;
35516@@ -555,7 +603,7 @@ static void print_absolute_symbols(void)
35517
35518 static void print_absolute_relocs(void)
35519 {
35520- int i, printed = 0;
35521+ unsigned int i, printed = 0;
35522 const char *format;
35523
35524 if (ELF_BITS == 64)
35525@@ -568,7 +616,7 @@ static void print_absolute_relocs(void)
35526 struct section *sec_applies, *sec_symtab;
35527 char *sym_strtab;
35528 Elf_Sym *sh_symtab;
35529- int j;
35530+ unsigned int j;
35531 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35532 continue;
35533 }
35534@@ -645,13 +693,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
35535 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
35536 Elf_Sym *sym, const char *symname))
35537 {
35538- int i;
35539+ unsigned int i;
35540 /* Walk through the relocations */
35541 for (i = 0; i < ehdr.e_shnum; i++) {
35542 char *sym_strtab;
35543 Elf_Sym *sh_symtab;
35544 struct section *sec_applies, *sec_symtab;
35545- int j;
35546+ unsigned int j;
35547 struct section *sec = &secs[i];
35548
35549 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35550@@ -830,6 +878,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35551 {
35552 unsigned r_type = ELF32_R_TYPE(rel->r_info);
35553 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
35554+ char *sym_strtab = sec->link->link->strtab;
35555+
35556+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
35557+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
35558+ return 0;
35559+
35560+#ifdef CONFIG_PAX_KERNEXEC
35561+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
35562+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
35563+ return 0;
35564+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
35565+ return 0;
35566+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
35567+ return 0;
35568+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
35569+ return 0;
35570+#endif
35571
35572 switch (r_type) {
35573 case R_386_NONE:
35574@@ -968,7 +1033,7 @@ static int write32_as_text(uint32_t v, FILE *f)
35575
35576 static void emit_relocs(int as_text, int use_real_mode)
35577 {
35578- int i;
35579+ unsigned int i;
35580 int (*write_reloc)(uint32_t, FILE *) = write32;
35581 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35582 const char *symname);
35583@@ -1078,10 +1143,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
35584 {
35585 regex_init(use_real_mode);
35586 read_ehdr(fp);
35587+ read_phdrs(fp);
35588 read_shdrs(fp);
35589 read_strtabs(fp);
35590 read_symtabs(fp);
35591- read_relocs(fp);
35592+ read_relocs(fp, use_real_mode);
35593 if (ELF_BITS == 64)
35594 percpu_init();
35595 if (show_absolute_syms) {
35596diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
35597index f40281e..92728c9 100644
35598--- a/arch/x86/um/mem_32.c
35599+++ b/arch/x86/um/mem_32.c
35600@@ -21,7 +21,7 @@ static int __init gate_vma_init(void)
35601 gate_vma.vm_start = FIXADDR_USER_START;
35602 gate_vma.vm_end = FIXADDR_USER_END;
35603 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
35604- gate_vma.vm_page_prot = __P101;
35605+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
35606
35607 return 0;
35608 }
35609diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
35610index 80ffa5b..a33bd15 100644
35611--- a/arch/x86/um/tls_32.c
35612+++ b/arch/x86/um/tls_32.c
35613@@ -260,7 +260,7 @@ out:
35614 if (unlikely(task == current &&
35615 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
35616 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
35617- "without flushed TLS.", current->pid);
35618+ "without flushed TLS.", task_pid_nr(current));
35619 }
35620
35621 return 0;
35622diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
35623index 7b9be98..39bb57f 100644
35624--- a/arch/x86/vdso/Makefile
35625+++ b/arch/x86/vdso/Makefile
35626@@ -175,7 +175,7 @@ quiet_cmd_vdso = VDSO $@
35627 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
35628 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
35629
35630-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35631+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35632 $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
35633 GCOV_PROFILE := n
35634
35635diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h
35636index 0224987..c7d65a5 100644
35637--- a/arch/x86/vdso/vdso2c.h
35638+++ b/arch/x86/vdso/vdso2c.h
35639@@ -12,7 +12,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
35640 unsigned long load_size = -1; /* Work around bogus warning */
35641 unsigned long mapping_size;
35642 ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
35643- int i;
35644+ unsigned int i;
35645 unsigned long j;
35646 ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
35647 *alt_sec = NULL;
35648diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
35649index e904c27..b9eaa03 100644
35650--- a/arch/x86/vdso/vdso32-setup.c
35651+++ b/arch/x86/vdso/vdso32-setup.c
35652@@ -14,6 +14,7 @@
35653 #include <asm/cpufeature.h>
35654 #include <asm/processor.h>
35655 #include <asm/vdso.h>
35656+#include <asm/mman.h>
35657
35658 #ifdef CONFIG_COMPAT_VDSO
35659 #define VDSO_DEFAULT 0
35660diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
35661index 1c9f750..cfddb1a 100644
35662--- a/arch/x86/vdso/vma.c
35663+++ b/arch/x86/vdso/vma.c
35664@@ -19,10 +19,7 @@
35665 #include <asm/page.h>
35666 #include <asm/hpet.h>
35667 #include <asm/desc.h>
35668-
35669-#if defined(CONFIG_X86_64)
35670-unsigned int __read_mostly vdso64_enabled = 1;
35671-#endif
35672+#include <asm/mman.h>
35673
35674 void __init init_vdso_image(const struct vdso_image *image)
35675 {
35676@@ -101,6 +98,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35677 .pages = no_pages,
35678 };
35679
35680+#ifdef CONFIG_PAX_RANDMMAP
35681+ if (mm->pax_flags & MF_PAX_RANDMMAP)
35682+ calculate_addr = false;
35683+#endif
35684+
35685 if (calculate_addr) {
35686 addr = vdso_addr(current->mm->start_stack,
35687 image->size - image->sym_vvar_start);
35688@@ -111,14 +113,14 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35689 down_write(&mm->mmap_sem);
35690
35691 addr = get_unmapped_area(NULL, addr,
35692- image->size - image->sym_vvar_start, 0, 0);
35693+ image->size - image->sym_vvar_start, 0, MAP_EXECUTABLE);
35694 if (IS_ERR_VALUE(addr)) {
35695 ret = addr;
35696 goto up_fail;
35697 }
35698
35699 text_start = addr - image->sym_vvar_start;
35700- current->mm->context.vdso = (void __user *)text_start;
35701+ mm->context.vdso = text_start;
35702
35703 /*
35704 * MAYWRITE to allow gdb to COW and set breakpoints
35705@@ -163,15 +165,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35706 hpet_address >> PAGE_SHIFT,
35707 PAGE_SIZE,
35708 pgprot_noncached(PAGE_READONLY));
35709-
35710- if (ret)
35711- goto up_fail;
35712 }
35713 #endif
35714
35715 up_fail:
35716 if (ret)
35717- current->mm->context.vdso = NULL;
35718+ current->mm->context.vdso = 0;
35719
35720 up_write(&mm->mmap_sem);
35721 return ret;
35722@@ -191,8 +190,8 @@ static int load_vdso32(void)
35723
35724 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
35725 current_thread_info()->sysenter_return =
35726- current->mm->context.vdso +
35727- selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
35728+ (void __force_user *)(current->mm->context.vdso +
35729+ selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
35730
35731 return 0;
35732 }
35733@@ -201,9 +200,6 @@ static int load_vdso32(void)
35734 #ifdef CONFIG_X86_64
35735 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35736 {
35737- if (!vdso64_enabled)
35738- return 0;
35739-
35740 return map_vdso(&vdso_image_64, true);
35741 }
35742
35743@@ -212,12 +208,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
35744 int uses_interp)
35745 {
35746 #ifdef CONFIG_X86_X32_ABI
35747- if (test_thread_flag(TIF_X32)) {
35748- if (!vdso64_enabled)
35749- return 0;
35750-
35751+ if (test_thread_flag(TIF_X32))
35752 return map_vdso(&vdso_image_x32, true);
35753- }
35754 #endif
35755
35756 return load_vdso32();
35757@@ -231,15 +223,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35758 #endif
35759
35760 #ifdef CONFIG_X86_64
35761-static __init int vdso_setup(char *s)
35762-{
35763- vdso64_enabled = simple_strtoul(s, NULL, 0);
35764- return 0;
35765-}
35766-__setup("vdso=", vdso_setup);
35767-#endif
35768-
35769-#ifdef CONFIG_X86_64
35770 static void vgetcpu_cpu_init(void *arg)
35771 {
35772 int cpu = smp_processor_id();
35773diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
35774index e88fda8..76ce7ce 100644
35775--- a/arch/x86/xen/Kconfig
35776+++ b/arch/x86/xen/Kconfig
35777@@ -9,6 +9,7 @@ config XEN
35778 select XEN_HAVE_PVMMU
35779 depends on X86_64 || (X86_32 && X86_PAE)
35780 depends on X86_TSC
35781+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
35782 help
35783 This is the Linux Xen port. Enabling this will allow the
35784 kernel to boot in a paravirtualized environment under the
35785diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
35786index 5240f56..0c12163 100644
35787--- a/arch/x86/xen/enlighten.c
35788+++ b/arch/x86/xen/enlighten.c
35789@@ -125,8 +125,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
35790
35791 struct shared_info xen_dummy_shared_info;
35792
35793-void *xen_initial_gdt;
35794-
35795 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
35796 __read_mostly int xen_have_vector_callback;
35797 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
35798@@ -544,8 +542,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
35799 {
35800 unsigned long va = dtr->address;
35801 unsigned int size = dtr->size + 1;
35802- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35803- unsigned long frames[pages];
35804+ unsigned long frames[65536 / PAGE_SIZE];
35805 int f;
35806
35807 /*
35808@@ -593,8 +590,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35809 {
35810 unsigned long va = dtr->address;
35811 unsigned int size = dtr->size + 1;
35812- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35813- unsigned long frames[pages];
35814+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
35815 int f;
35816
35817 /*
35818@@ -602,7 +598,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35819 * 8-byte entries, or 16 4k pages..
35820 */
35821
35822- BUG_ON(size > 65536);
35823+ BUG_ON(size > GDT_SIZE);
35824 BUG_ON(va & ~PAGE_MASK);
35825
35826 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
35827@@ -991,7 +987,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
35828 return 0;
35829 }
35830
35831-static void set_xen_basic_apic_ops(void)
35832+static void __init set_xen_basic_apic_ops(void)
35833 {
35834 apic->read = xen_apic_read;
35835 apic->write = xen_apic_write;
35836@@ -1308,30 +1304,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
35837 #endif
35838 };
35839
35840-static void xen_reboot(int reason)
35841+static __noreturn void xen_reboot(int reason)
35842 {
35843 struct sched_shutdown r = { .reason = reason };
35844
35845- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
35846- BUG();
35847+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
35848+ BUG();
35849 }
35850
35851-static void xen_restart(char *msg)
35852+static __noreturn void xen_restart(char *msg)
35853 {
35854 xen_reboot(SHUTDOWN_reboot);
35855 }
35856
35857-static void xen_emergency_restart(void)
35858+static __noreturn void xen_emergency_restart(void)
35859 {
35860 xen_reboot(SHUTDOWN_reboot);
35861 }
35862
35863-static void xen_machine_halt(void)
35864+static __noreturn void xen_machine_halt(void)
35865 {
35866 xen_reboot(SHUTDOWN_poweroff);
35867 }
35868
35869-static void xen_machine_power_off(void)
35870+static __noreturn void xen_machine_power_off(void)
35871 {
35872 if (pm_power_off)
35873 pm_power_off();
35874@@ -1484,8 +1480,11 @@ static void __ref xen_setup_gdt(int cpu)
35875 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
35876 pv_cpu_ops.load_gdt = xen_load_gdt_boot;
35877
35878- setup_stack_canary_segment(0);
35879- switch_to_new_gdt(0);
35880+ setup_stack_canary_segment(cpu);
35881+#ifdef CONFIG_X86_64
35882+ load_percpu_segment(cpu);
35883+#endif
35884+ switch_to_new_gdt(cpu);
35885
35886 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
35887 pv_cpu_ops.load_gdt = xen_load_gdt;
35888@@ -1600,7 +1599,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
35889 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
35890
35891 /* Work out if we support NX */
35892- x86_configure_nx();
35893+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
35894+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
35895+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
35896+ unsigned l, h;
35897+
35898+ __supported_pte_mask |= _PAGE_NX;
35899+ rdmsr(MSR_EFER, l, h);
35900+ l |= EFER_NX;
35901+ wrmsr(MSR_EFER, l, h);
35902+ }
35903+#endif
35904
35905 /* Get mfn list */
35906 xen_build_dynamic_phys_to_machine();
35907@@ -1628,13 +1637,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
35908
35909 machine_ops = xen_machine_ops;
35910
35911- /*
35912- * The only reliable way to retain the initial address of the
35913- * percpu gdt_page is to remember it here, so we can go and
35914- * mark it RW later, when the initial percpu area is freed.
35915- */
35916- xen_initial_gdt = &per_cpu(gdt_page, 0);
35917-
35918 xen_smp_init();
35919
35920 #ifdef CONFIG_ACPI_NUMA
35921diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
35922index adca9e2..cdba9d1 100644
35923--- a/arch/x86/xen/mmu.c
35924+++ b/arch/x86/xen/mmu.c
35925@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
35926 return val;
35927 }
35928
35929-static pteval_t pte_pfn_to_mfn(pteval_t val)
35930+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
35931 {
35932 if (val & _PAGE_PRESENT) {
35933 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
35934@@ -1835,7 +1835,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35935 * L3_k[511] -> level2_fixmap_pgt */
35936 convert_pfn_mfn(level3_kernel_pgt);
35937
35938+ convert_pfn_mfn(level3_vmalloc_start_pgt);
35939+ convert_pfn_mfn(level3_vmalloc_end_pgt);
35940+ convert_pfn_mfn(level3_vmemmap_pgt);
35941 /* L3_k[511][506] -> level1_fixmap_pgt */
35942+ /* L3_k[511][507] -> level1_vsyscall_pgt */
35943 convert_pfn_mfn(level2_fixmap_pgt);
35944 }
35945 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
35946@@ -1860,11 +1864,18 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35947 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
35948 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
35949 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
35950+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
35951+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
35952+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
35953 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
35954 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
35955+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
35956 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
35957 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
35958- set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
35959+ set_page_prot(level1_fixmap_pgt[0], PAGE_KERNEL_RO);
35960+ set_page_prot(level1_fixmap_pgt[1], PAGE_KERNEL_RO);
35961+ set_page_prot(level1_fixmap_pgt[2], PAGE_KERNEL_RO);
35962+ set_page_prot(level1_vsyscall_pgt, PAGE_KERNEL_RO);
35963
35964 /* Pin down new L4 */
35965 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
35966@@ -2048,6 +2059,7 @@ static void __init xen_post_allocator_init(void)
35967 pv_mmu_ops.set_pud = xen_set_pud;
35968 #if PAGETABLE_LEVELS == 4
35969 pv_mmu_ops.set_pgd = xen_set_pgd;
35970+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
35971 #endif
35972
35973 /* This will work as long as patching hasn't happened yet
35974@@ -2126,6 +2138,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
35975 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
35976 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
35977 .set_pgd = xen_set_pgd_hyper,
35978+ .set_pgd_batched = xen_set_pgd_hyper,
35979
35980 .alloc_pud = xen_alloc_pmd_init,
35981 .release_pud = xen_release_pmd_init,
35982diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
35983index 08e8489..b1e182f 100644
35984--- a/arch/x86/xen/smp.c
35985+++ b/arch/x86/xen/smp.c
35986@@ -288,17 +288,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
35987
35988 if (xen_pv_domain()) {
35989 if (!xen_feature(XENFEAT_writable_page_tables))
35990- /* We've switched to the "real" per-cpu gdt, so make
35991- * sure the old memory can be recycled. */
35992- make_lowmem_page_readwrite(xen_initial_gdt);
35993-
35994 #ifdef CONFIG_X86_32
35995 /*
35996 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
35997 * expects __USER_DS
35998 */
35999- loadsegment(ds, __USER_DS);
36000- loadsegment(es, __USER_DS);
36001+ loadsegment(ds, __KERNEL_DS);
36002+ loadsegment(es, __KERNEL_DS);
36003 #endif
36004
36005 xen_filter_cpu_maps();
36006@@ -379,7 +375,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
36007 #ifdef CONFIG_X86_32
36008 /* Note: PVH is not yet supported on x86_32. */
36009 ctxt->user_regs.fs = __KERNEL_PERCPU;
36010- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
36011+ savesegment(gs, ctxt->user_regs.gs);
36012 #endif
36013 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
36014
36015@@ -387,8 +383,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
36016 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
36017 ctxt->flags = VGCF_IN_KERNEL;
36018 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
36019- ctxt->user_regs.ds = __USER_DS;
36020- ctxt->user_regs.es = __USER_DS;
36021+ ctxt->user_regs.ds = __KERNEL_DS;
36022+ ctxt->user_regs.es = __KERNEL_DS;
36023 ctxt->user_regs.ss = __KERNEL_DS;
36024
36025 xen_copy_trap_info(ctxt->trap_ctxt);
36026@@ -446,14 +442,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
36027 int rc;
36028
36029 per_cpu(current_task, cpu) = idle;
36030+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
36031 #ifdef CONFIG_X86_32
36032 irq_ctx_init(cpu);
36033 #else
36034 clear_tsk_thread_flag(idle, TIF_FORK);
36035 #endif
36036- per_cpu(kernel_stack, cpu) =
36037- (unsigned long)task_stack_page(idle) -
36038- KERNEL_STACK_OFFSET + THREAD_SIZE;
36039+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
36040
36041 xen_setup_runstate_info(cpu);
36042 xen_setup_timer(cpu);
36043@@ -732,7 +727,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
36044
36045 void __init xen_smp_init(void)
36046 {
36047- smp_ops = xen_smp_ops;
36048+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
36049 xen_fill_possible_map();
36050 }
36051
36052diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
36053index fd92a64..1f72641 100644
36054--- a/arch/x86/xen/xen-asm_32.S
36055+++ b/arch/x86/xen/xen-asm_32.S
36056@@ -99,7 +99,7 @@ ENTRY(xen_iret)
36057 pushw %fs
36058 movl $(__KERNEL_PERCPU), %eax
36059 movl %eax, %fs
36060- movl %fs:xen_vcpu, %eax
36061+ mov PER_CPU_VAR(xen_vcpu), %eax
36062 POP_FS
36063 #else
36064 movl %ss:xen_vcpu, %eax
36065diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
36066index 674b2225..f1f5dc1 100644
36067--- a/arch/x86/xen/xen-head.S
36068+++ b/arch/x86/xen/xen-head.S
36069@@ -39,6 +39,17 @@ ENTRY(startup_xen)
36070 #ifdef CONFIG_X86_32
36071 mov %esi,xen_start_info
36072 mov $init_thread_union+THREAD_SIZE,%esp
36073+#ifdef CONFIG_SMP
36074+ movl $cpu_gdt_table,%edi
36075+ movl $__per_cpu_load,%eax
36076+ movw %ax,__KERNEL_PERCPU + 2(%edi)
36077+ rorl $16,%eax
36078+ movb %al,__KERNEL_PERCPU + 4(%edi)
36079+ movb %ah,__KERNEL_PERCPU + 7(%edi)
36080+ movl $__per_cpu_end - 1,%eax
36081+ subl $__per_cpu_start,%eax
36082+ movw %ax,__KERNEL_PERCPU + 0(%edi)
36083+#endif
36084 #else
36085 mov %rsi,xen_start_info
36086 mov $init_thread_union+THREAD_SIZE,%rsp
36087diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
36088index 9e195c6..523ed36 100644
36089--- a/arch/x86/xen/xen-ops.h
36090+++ b/arch/x86/xen/xen-ops.h
36091@@ -16,8 +16,6 @@ void xen_syscall_target(void);
36092 void xen_syscall32_target(void);
36093 #endif
36094
36095-extern void *xen_initial_gdt;
36096-
36097 struct trap_info;
36098 void xen_copy_trap_info(struct trap_info *traps);
36099
36100diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
36101index 525bd3d..ef888b1 100644
36102--- a/arch/xtensa/variants/dc232b/include/variant/core.h
36103+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
36104@@ -119,9 +119,9 @@
36105 ----------------------------------------------------------------------*/
36106
36107 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
36108-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
36109 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
36110 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
36111+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36112
36113 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
36114 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
36115diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
36116index 2f33760..835e50a 100644
36117--- a/arch/xtensa/variants/fsf/include/variant/core.h
36118+++ b/arch/xtensa/variants/fsf/include/variant/core.h
36119@@ -11,6 +11,7 @@
36120 #ifndef _XTENSA_CORE_H
36121 #define _XTENSA_CORE_H
36122
36123+#include <linux/const.h>
36124
36125 /****************************************************************************
36126 Parameters Useful for Any Code, USER or PRIVILEGED
36127@@ -112,9 +113,9 @@
36128 ----------------------------------------------------------------------*/
36129
36130 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
36131-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
36132 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
36133 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
36134+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36135
36136 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
36137 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
36138diff --git a/block/bio.c b/block/bio.c
36139index f66a4ea..73ddf55 100644
36140--- a/block/bio.c
36141+++ b/block/bio.c
36142@@ -1172,7 +1172,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
36143 /*
36144 * Overflow, abort
36145 */
36146- if (end < start)
36147+ if (end < start || end - start > INT_MAX - nr_pages)
36148 return ERR_PTR(-EINVAL);
36149
36150 nr_pages += end - start;
36151@@ -1297,7 +1297,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
36152 /*
36153 * Overflow, abort
36154 */
36155- if (end < start)
36156+ if (end < start || end - start > INT_MAX - nr_pages)
36157 return ERR_PTR(-EINVAL);
36158
36159 nr_pages += end - start;
36160diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
36161index 0736729..2ec3b48 100644
36162--- a/block/blk-iopoll.c
36163+++ b/block/blk-iopoll.c
36164@@ -74,7 +74,7 @@ void blk_iopoll_complete(struct blk_iopoll *iop)
36165 }
36166 EXPORT_SYMBOL(blk_iopoll_complete);
36167
36168-static void blk_iopoll_softirq(struct softirq_action *h)
36169+static __latent_entropy void blk_iopoll_softirq(void)
36170 {
36171 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
36172 int rearm = 0, budget = blk_iopoll_budget;
36173diff --git a/block/blk-map.c b/block/blk-map.c
36174index b8d2725..08c52b0 100644
36175--- a/block/blk-map.c
36176+++ b/block/blk-map.c
36177@@ -192,7 +192,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
36178 if (!len || !kbuf)
36179 return -EINVAL;
36180
36181- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
36182+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
36183 if (do_copy)
36184 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
36185 else
36186diff --git a/block/blk-softirq.c b/block/blk-softirq.c
36187index 53b1737..08177d2e 100644
36188--- a/block/blk-softirq.c
36189+++ b/block/blk-softirq.c
36190@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
36191 * Softirq action handler - move entries to local list and loop over them
36192 * while passing them to the queue registered handler.
36193 */
36194-static void blk_done_softirq(struct softirq_action *h)
36195+static __latent_entropy void blk_done_softirq(void)
36196 {
36197 struct list_head *cpu_list, local_list;
36198
36199diff --git a/block/bsg.c b/block/bsg.c
36200index d214e92..9649863 100644
36201--- a/block/bsg.c
36202+++ b/block/bsg.c
36203@@ -140,16 +140,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
36204 struct sg_io_v4 *hdr, struct bsg_device *bd,
36205 fmode_t has_write_perm)
36206 {
36207+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36208+ unsigned char *cmdptr;
36209+
36210 if (hdr->request_len > BLK_MAX_CDB) {
36211 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
36212 if (!rq->cmd)
36213 return -ENOMEM;
36214- }
36215+ cmdptr = rq->cmd;
36216+ } else
36217+ cmdptr = tmpcmd;
36218
36219- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
36220+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
36221 hdr->request_len))
36222 return -EFAULT;
36223
36224+ if (cmdptr != rq->cmd)
36225+ memcpy(rq->cmd, cmdptr, hdr->request_len);
36226+
36227 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
36228 if (blk_verify_command(rq->cmd, has_write_perm))
36229 return -EPERM;
36230diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
36231index f678c73..f35aa18 100644
36232--- a/block/compat_ioctl.c
36233+++ b/block/compat_ioctl.c
36234@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
36235 cgc = compat_alloc_user_space(sizeof(*cgc));
36236 cgc32 = compat_ptr(arg);
36237
36238- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
36239+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
36240 get_user(data, &cgc32->buffer) ||
36241 put_user(compat_ptr(data), &cgc->buffer) ||
36242 copy_in_user(&cgc->buflen, &cgc32->buflen,
36243@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
36244 err |= __get_user(f->spec1, &uf->spec1);
36245 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
36246 err |= __get_user(name, &uf->name);
36247- f->name = compat_ptr(name);
36248+ f->name = (void __force_kernel *)compat_ptr(name);
36249 if (err) {
36250 err = -EFAULT;
36251 goto out;
36252diff --git a/block/genhd.c b/block/genhd.c
36253index 0a536dc..b8f7aca 100644
36254--- a/block/genhd.c
36255+++ b/block/genhd.c
36256@@ -469,21 +469,24 @@ static char *bdevt_str(dev_t devt, char *buf)
36257
36258 /*
36259 * Register device numbers dev..(dev+range-1)
36260- * range must be nonzero
36261+ * Noop if @range is zero.
36262 * The hash chain is sorted on range, so that subranges can override.
36263 */
36264 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
36265 struct kobject *(*probe)(dev_t, int *, void *),
36266 int (*lock)(dev_t, void *), void *data)
36267 {
36268- kobj_map(bdev_map, devt, range, module, probe, lock, data);
36269+ if (range)
36270+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
36271 }
36272
36273 EXPORT_SYMBOL(blk_register_region);
36274
36275+/* undo blk_register_region(), noop if @range is zero */
36276 void blk_unregister_region(dev_t devt, unsigned long range)
36277 {
36278- kobj_unmap(bdev_map, devt, range);
36279+ if (range)
36280+ kobj_unmap(bdev_map, devt, range);
36281 }
36282
36283 EXPORT_SYMBOL(blk_unregister_region);
36284diff --git a/block/partitions/efi.c b/block/partitions/efi.c
36285index 26cb624..a49c3a5 100644
36286--- a/block/partitions/efi.c
36287+++ b/block/partitions/efi.c
36288@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
36289 if (!gpt)
36290 return NULL;
36291
36292+ if (!le32_to_cpu(gpt->num_partition_entries))
36293+ return NULL;
36294+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
36295+ if (!pte)
36296+ return NULL;
36297+
36298 count = le32_to_cpu(gpt->num_partition_entries) *
36299 le32_to_cpu(gpt->sizeof_partition_entry);
36300- if (!count)
36301- return NULL;
36302- pte = kmalloc(count, GFP_KERNEL);
36303- if (!pte)
36304- return NULL;
36305-
36306 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
36307 (u8 *) pte, count) < count) {
36308 kfree(pte);
36309diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
36310index e1f71c3..02d295a 100644
36311--- a/block/scsi_ioctl.c
36312+++ b/block/scsi_ioctl.c
36313@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
36314 return put_user(0, p);
36315 }
36316
36317-static int sg_get_timeout(struct request_queue *q)
36318+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
36319 {
36320 return jiffies_to_clock_t(q->sg_timeout);
36321 }
36322@@ -227,8 +227,20 @@ EXPORT_SYMBOL(blk_verify_command);
36323 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
36324 struct sg_io_hdr *hdr, fmode_t mode)
36325 {
36326- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
36327+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36328+ unsigned char *cmdptr;
36329+
36330+ if (rq->cmd != rq->__cmd)
36331+ cmdptr = rq->cmd;
36332+ else
36333+ cmdptr = tmpcmd;
36334+
36335+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
36336 return -EFAULT;
36337+
36338+ if (cmdptr != rq->cmd)
36339+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
36340+
36341 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
36342 return -EPERM;
36343
36344@@ -422,6 +434,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36345 int err;
36346 unsigned int in_len, out_len, bytes, opcode, cmdlen;
36347 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
36348+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36349+ unsigned char *cmdptr;
36350
36351 if (!sic)
36352 return -EINVAL;
36353@@ -460,9 +474,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36354 */
36355 err = -EFAULT;
36356 rq->cmd_len = cmdlen;
36357- if (copy_from_user(rq->cmd, sic->data, cmdlen))
36358+
36359+ if (rq->cmd != rq->__cmd)
36360+ cmdptr = rq->cmd;
36361+ else
36362+ cmdptr = tmpcmd;
36363+
36364+ if (copy_from_user(cmdptr, sic->data, cmdlen))
36365 goto error;
36366
36367+ if (rq->cmd != cmdptr)
36368+ memcpy(rq->cmd, cmdptr, cmdlen);
36369+
36370 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
36371 goto error;
36372
36373diff --git a/crypto/cryptd.c b/crypto/cryptd.c
36374index 650afac1..f3307de 100644
36375--- a/crypto/cryptd.c
36376+++ b/crypto/cryptd.c
36377@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
36378
36379 struct cryptd_blkcipher_request_ctx {
36380 crypto_completion_t complete;
36381-};
36382+} __no_const;
36383
36384 struct cryptd_hash_ctx {
36385 struct crypto_shash *child;
36386@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
36387
36388 struct cryptd_aead_request_ctx {
36389 crypto_completion_t complete;
36390-};
36391+} __no_const;
36392
36393 static void cryptd_queue_worker(struct work_struct *work);
36394
36395diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
36396index c305d41..a96de79 100644
36397--- a/crypto/pcrypt.c
36398+++ b/crypto/pcrypt.c
36399@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
36400 int ret;
36401
36402 pinst->kobj.kset = pcrypt_kset;
36403- ret = kobject_add(&pinst->kobj, NULL, name);
36404+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
36405 if (!ret)
36406 kobject_uevent(&pinst->kobj, KOBJ_ADD);
36407
36408diff --git a/crypto/zlib.c b/crypto/zlib.c
36409index 0eefa9d..0fa3d29 100644
36410--- a/crypto/zlib.c
36411+++ b/crypto/zlib.c
36412@@ -95,10 +95,10 @@ static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params,
36413 zlib_comp_exit(ctx);
36414
36415 window_bits = tb[ZLIB_COMP_WINDOWBITS]
36416- ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS])
36417+ ? nla_get_s32(tb[ZLIB_COMP_WINDOWBITS])
36418 : MAX_WBITS;
36419 mem_level = tb[ZLIB_COMP_MEMLEVEL]
36420- ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL])
36421+ ? nla_get_s32(tb[ZLIB_COMP_MEMLEVEL])
36422 : DEF_MEM_LEVEL;
36423
36424 workspacesize = zlib_deflate_workspacesize(window_bits, mem_level);
36425diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
36426index 3b37676..898edfa 100644
36427--- a/drivers/acpi/acpica/hwxfsleep.c
36428+++ b/drivers/acpi/acpica/hwxfsleep.c
36429@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
36430 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
36431
36432 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
36433- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36434- acpi_hw_extended_sleep},
36435- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36436- acpi_hw_extended_wake_prep},
36437- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
36438+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36439+ .extended_function = acpi_hw_extended_sleep},
36440+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36441+ .extended_function = acpi_hw_extended_wake_prep},
36442+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
36443+ .extended_function = acpi_hw_extended_wake}
36444 };
36445
36446 /*
36447diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
36448index 16129c7..8b675cd 100644
36449--- a/drivers/acpi/apei/apei-internal.h
36450+++ b/drivers/acpi/apei/apei-internal.h
36451@@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
36452 struct apei_exec_ins_type {
36453 u32 flags;
36454 apei_exec_ins_func_t run;
36455-};
36456+} __do_const;
36457
36458 struct apei_exec_context {
36459 u32 ip;
36460diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
36461index e82d097..0c855c1 100644
36462--- a/drivers/acpi/apei/ghes.c
36463+++ b/drivers/acpi/apei/ghes.c
36464@@ -478,7 +478,7 @@ static void __ghes_print_estatus(const char *pfx,
36465 const struct acpi_hest_generic *generic,
36466 const struct acpi_hest_generic_status *estatus)
36467 {
36468- static atomic_t seqno;
36469+ static atomic_unchecked_t seqno;
36470 unsigned int curr_seqno;
36471 char pfx_seq[64];
36472
36473@@ -489,7 +489,7 @@ static void __ghes_print_estatus(const char *pfx,
36474 else
36475 pfx = KERN_ERR;
36476 }
36477- curr_seqno = atomic_inc_return(&seqno);
36478+ curr_seqno = atomic_inc_return_unchecked(&seqno);
36479 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
36480 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
36481 pfx_seq, generic->header.source_id);
36482diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
36483index a83e3c6..c3d617f 100644
36484--- a/drivers/acpi/bgrt.c
36485+++ b/drivers/acpi/bgrt.c
36486@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
36487 if (!bgrt_image)
36488 return -ENODEV;
36489
36490- bin_attr_image.private = bgrt_image;
36491- bin_attr_image.size = bgrt_image_size;
36492+ pax_open_kernel();
36493+ *(void **)&bin_attr_image.private = bgrt_image;
36494+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
36495+ pax_close_kernel();
36496
36497 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
36498 if (!bgrt_kobj)
36499diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
36500index 9b693d5..8953d54 100644
36501--- a/drivers/acpi/blacklist.c
36502+++ b/drivers/acpi/blacklist.c
36503@@ -51,7 +51,7 @@ struct acpi_blacklist_item {
36504 u32 is_critical_error;
36505 };
36506
36507-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
36508+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
36509
36510 /*
36511 * POLICY: If *anything* doesn't work, put it on the blacklist.
36512@@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
36513 return 0;
36514 }
36515
36516-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
36517+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
36518 {
36519 .callback = dmi_disable_osi_vista,
36520 .ident = "Fujitsu Siemens",
36521diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
36522index 8b67bd0..b59593e 100644
36523--- a/drivers/acpi/bus.c
36524+++ b/drivers/acpi/bus.c
36525@@ -67,7 +67,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
36526 }
36527 #endif
36528
36529-static struct dmi_system_id dsdt_dmi_table[] __initdata = {
36530+static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
36531 /*
36532 * Invoke DSDT corruption work-around on all Toshiba Satellite.
36533 * https://bugzilla.kernel.org/show_bug.cgi?id=14679
36534@@ -83,7 +83,7 @@ static struct dmi_system_id dsdt_dmi_table[] __initdata = {
36535 {}
36536 };
36537 #else
36538-static struct dmi_system_id dsdt_dmi_table[] __initdata = {
36539+static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
36540 {}
36541 };
36542 #endif
36543diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
36544index c68e724..e863008 100644
36545--- a/drivers/acpi/custom_method.c
36546+++ b/drivers/acpi/custom_method.c
36547@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
36548 struct acpi_table_header table;
36549 acpi_status status;
36550
36551+#ifdef CONFIG_GRKERNSEC_KMEM
36552+ return -EPERM;
36553+#endif
36554+
36555 if (!(*ppos)) {
36556 /* parse the table header to get the table length */
36557 if (count <= sizeof(struct acpi_table_header))
36558diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
36559index 735db11..91e07ff 100644
36560--- a/drivers/acpi/device_pm.c
36561+++ b/drivers/acpi/device_pm.c
36562@@ -1025,6 +1025,8 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
36563
36564 #endif /* CONFIG_PM_SLEEP */
36565
36566+static void acpi_dev_pm_detach(struct device *dev, bool power_off);
36567+
36568 static struct dev_pm_domain acpi_general_pm_domain = {
36569 .ops = {
36570 .runtime_suspend = acpi_subsys_runtime_suspend,
36571@@ -1041,6 +1043,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
36572 .restore_early = acpi_subsys_resume_early,
36573 #endif
36574 },
36575+ .detach = acpi_dev_pm_detach
36576 };
36577
36578 /**
36579@@ -1110,7 +1113,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
36580 acpi_device_wakeup(adev, ACPI_STATE_S0, false);
36581 }
36582
36583- dev->pm_domain->detach = acpi_dev_pm_detach;
36584 return 0;
36585 }
36586 EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
36587diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
36588index a8dd2f7..e15950e 100644
36589--- a/drivers/acpi/ec.c
36590+++ b/drivers/acpi/ec.c
36591@@ -1242,7 +1242,7 @@ static int ec_clear_on_resume(const struct dmi_system_id *id)
36592 return 0;
36593 }
36594
36595-static struct dmi_system_id ec_dmi_table[] __initdata = {
36596+static const struct dmi_system_id ec_dmi_table[] __initconst = {
36597 {
36598 ec_skip_dsdt_scan, "Compal JFL92", {
36599 DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
36600diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
36601index 139d9e4..9a9d799 100644
36602--- a/drivers/acpi/pci_slot.c
36603+++ b/drivers/acpi/pci_slot.c
36604@@ -195,7 +195,7 @@ static int do_sta_before_sun(const struct dmi_system_id *d)
36605 return 0;
36606 }
36607
36608-static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = {
36609+static const struct dmi_system_id acpi_pci_slot_dmi_table[] __initconst = {
36610 /*
36611 * Fujitsu Primequest machines will return 1023 to indicate an
36612 * error if the _SUN method is evaluated on SxFy objects that
36613diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
36614index d9f7158..168e742 100644
36615--- a/drivers/acpi/processor_driver.c
36616+++ b/drivers/acpi/processor_driver.c
36617@@ -159,7 +159,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
36618 return NOTIFY_OK;
36619 }
36620
36621-static struct notifier_block __refdata acpi_cpu_notifier = {
36622+static struct notifier_block __refconst acpi_cpu_notifier = {
36623 .notifier_call = acpi_cpu_soft_notify,
36624 };
36625
36626diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
36627index f98db0b..8309c83 100644
36628--- a/drivers/acpi/processor_idle.c
36629+++ b/drivers/acpi/processor_idle.c
36630@@ -912,7 +912,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
36631 {
36632 int i, count = CPUIDLE_DRIVER_STATE_START;
36633 struct acpi_processor_cx *cx;
36634- struct cpuidle_state *state;
36635+ cpuidle_state_no_const *state;
36636 struct cpuidle_driver *drv = &acpi_idle_driver;
36637
36638 if (!pr->flags.power_setup_done)
36639diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
36640index e5dd808..1eceed1 100644
36641--- a/drivers/acpi/processor_pdc.c
36642+++ b/drivers/acpi/processor_pdc.c
36643@@ -176,7 +176,7 @@ static int __init set_no_mwait(const struct dmi_system_id *id)
36644 return 0;
36645 }
36646
36647-static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
36648+static const struct dmi_system_id processor_idle_dmi_table[] __initconst = {
36649 {
36650 set_no_mwait, "Extensa 5220", {
36651 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
36652diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
36653index 7f251dd..47b262c 100644
36654--- a/drivers/acpi/sleep.c
36655+++ b/drivers/acpi/sleep.c
36656@@ -148,7 +148,7 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d)
36657 return 0;
36658 }
36659
36660-static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
36661+static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
36662 {
36663 .callback = init_old_suspend_ordering,
36664 .ident = "Abit KN9 (nForce4 variant)",
36665diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
36666index 13e577c..cef11ee 100644
36667--- a/drivers/acpi/sysfs.c
36668+++ b/drivers/acpi/sysfs.c
36669@@ -423,11 +423,11 @@ static u32 num_counters;
36670 static struct attribute **all_attrs;
36671 static u32 acpi_gpe_count;
36672
36673-static struct attribute_group interrupt_stats_attr_group = {
36674+static attribute_group_no_const interrupt_stats_attr_group = {
36675 .name = "interrupts",
36676 };
36677
36678-static struct kobj_attribute *counter_attrs;
36679+static kobj_attribute_no_const *counter_attrs;
36680
36681 static void delete_gpe_attr_array(void)
36682 {
36683diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
36684index d24fa19..782f1e6 100644
36685--- a/drivers/acpi/thermal.c
36686+++ b/drivers/acpi/thermal.c
36687@@ -1209,7 +1209,7 @@ static int thermal_psv(const struct dmi_system_id *d) {
36688 return 0;
36689 }
36690
36691-static struct dmi_system_id thermal_dmi_table[] __initdata = {
36692+static const struct dmi_system_id thermal_dmi_table[] __initconst = {
36693 /*
36694 * Award BIOS on this AOpen makes thermal control almost worthless.
36695 * http://bugzilla.kernel.org/show_bug.cgi?id=8842
36696diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
36697index 26eb70c..4d66ddf 100644
36698--- a/drivers/acpi/video.c
36699+++ b/drivers/acpi/video.c
36700@@ -418,7 +418,7 @@ static int __init video_disable_native_backlight(const struct dmi_system_id *d)
36701 return 0;
36702 }
36703
36704-static struct dmi_system_id video_dmi_table[] __initdata = {
36705+static const struct dmi_system_id video_dmi_table[] __initconst = {
36706 /*
36707 * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
36708 */
36709diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
36710index 61a9c07..ea98fa1 100644
36711--- a/drivers/ata/libahci.c
36712+++ b/drivers/ata/libahci.c
36713@@ -1252,7 +1252,7 @@ int ahci_kick_engine(struct ata_port *ap)
36714 }
36715 EXPORT_SYMBOL_GPL(ahci_kick_engine);
36716
36717-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36718+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36719 struct ata_taskfile *tf, int is_cmd, u16 flags,
36720 unsigned long timeout_msec)
36721 {
36722diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
36723index 23dac3b..89ada44 100644
36724--- a/drivers/ata/libata-core.c
36725+++ b/drivers/ata/libata-core.c
36726@@ -99,7 +99,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
36727 static void ata_dev_xfermask(struct ata_device *dev);
36728 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
36729
36730-atomic_t ata_print_id = ATOMIC_INIT(0);
36731+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
36732
36733 struct ata_force_param {
36734 const char *name;
36735@@ -4780,7 +4780,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
36736 struct ata_port *ap;
36737 unsigned int tag;
36738
36739- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36740+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36741 ap = qc->ap;
36742
36743 qc->flags = 0;
36744@@ -4797,7 +4797,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
36745 struct ata_port *ap;
36746 struct ata_link *link;
36747
36748- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36749+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36750 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
36751 ap = qc->ap;
36752 link = qc->dev->link;
36753@@ -5901,6 +5901,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36754 return;
36755
36756 spin_lock(&lock);
36757+ pax_open_kernel();
36758
36759 for (cur = ops->inherits; cur; cur = cur->inherits) {
36760 void **inherit = (void **)cur;
36761@@ -5914,8 +5915,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36762 if (IS_ERR(*pp))
36763 *pp = NULL;
36764
36765- ops->inherits = NULL;
36766+ *(struct ata_port_operations **)&ops->inherits = NULL;
36767
36768+ pax_close_kernel();
36769 spin_unlock(&lock);
36770 }
36771
36772@@ -6111,7 +6113,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
36773
36774 /* give ports names and add SCSI hosts */
36775 for (i = 0; i < host->n_ports; i++) {
36776- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
36777+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
36778 host->ports[i]->local_port_no = i + 1;
36779 }
36780
36781diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
36782index b061ba2..fdcd85f 100644
36783--- a/drivers/ata/libata-scsi.c
36784+++ b/drivers/ata/libata-scsi.c
36785@@ -4172,7 +4172,7 @@ int ata_sas_port_init(struct ata_port *ap)
36786
36787 if (rc)
36788 return rc;
36789- ap->print_id = atomic_inc_return(&ata_print_id);
36790+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
36791 return 0;
36792 }
36793 EXPORT_SYMBOL_GPL(ata_sas_port_init);
36794diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
36795index f840ca1..edd6ef3 100644
36796--- a/drivers/ata/libata.h
36797+++ b/drivers/ata/libata.h
36798@@ -53,7 +53,7 @@ enum {
36799 ATA_DNXFER_QUIET = (1 << 31),
36800 };
36801
36802-extern atomic_t ata_print_id;
36803+extern atomic_unchecked_t ata_print_id;
36804 extern int atapi_passthru16;
36805 extern int libata_fua;
36806 extern int libata_noacpi;
36807diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
36808index a9b0c82..207d97d 100644
36809--- a/drivers/ata/pata_arasan_cf.c
36810+++ b/drivers/ata/pata_arasan_cf.c
36811@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
36812 /* Handle platform specific quirks */
36813 if (quirk) {
36814 if (quirk & CF_BROKEN_PIO) {
36815- ap->ops->set_piomode = NULL;
36816+ pax_open_kernel();
36817+ *(void **)&ap->ops->set_piomode = NULL;
36818+ pax_close_kernel();
36819 ap->pio_mask = 0;
36820 }
36821 if (quirk & CF_BROKEN_MWDMA)
36822diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
36823index f9b983a..887b9d8 100644
36824--- a/drivers/atm/adummy.c
36825+++ b/drivers/atm/adummy.c
36826@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
36827 vcc->pop(vcc, skb);
36828 else
36829 dev_kfree_skb_any(skb);
36830- atomic_inc(&vcc->stats->tx);
36831+ atomic_inc_unchecked(&vcc->stats->tx);
36832
36833 return 0;
36834 }
36835diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
36836index f1a9198..f466a4a 100644
36837--- a/drivers/atm/ambassador.c
36838+++ b/drivers/atm/ambassador.c
36839@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
36840 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
36841
36842 // VC layer stats
36843- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36844+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36845
36846 // free the descriptor
36847 kfree (tx_descr);
36848@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36849 dump_skb ("<<<", vc, skb);
36850
36851 // VC layer stats
36852- atomic_inc(&atm_vcc->stats->rx);
36853+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36854 __net_timestamp(skb);
36855 // end of our responsibility
36856 atm_vcc->push (atm_vcc, skb);
36857@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36858 } else {
36859 PRINTK (KERN_INFO, "dropped over-size frame");
36860 // should we count this?
36861- atomic_inc(&atm_vcc->stats->rx_drop);
36862+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36863 }
36864
36865 } else {
36866@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
36867 }
36868
36869 if (check_area (skb->data, skb->len)) {
36870- atomic_inc(&atm_vcc->stats->tx_err);
36871+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
36872 return -ENOMEM; // ?
36873 }
36874
36875diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
36876index 480fa6f..947067c 100644
36877--- a/drivers/atm/atmtcp.c
36878+++ b/drivers/atm/atmtcp.c
36879@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36880 if (vcc->pop) vcc->pop(vcc,skb);
36881 else dev_kfree_skb(skb);
36882 if (dev_data) return 0;
36883- atomic_inc(&vcc->stats->tx_err);
36884+ atomic_inc_unchecked(&vcc->stats->tx_err);
36885 return -ENOLINK;
36886 }
36887 size = skb->len+sizeof(struct atmtcp_hdr);
36888@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36889 if (!new_skb) {
36890 if (vcc->pop) vcc->pop(vcc,skb);
36891 else dev_kfree_skb(skb);
36892- atomic_inc(&vcc->stats->tx_err);
36893+ atomic_inc_unchecked(&vcc->stats->tx_err);
36894 return -ENOBUFS;
36895 }
36896 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
36897@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36898 if (vcc->pop) vcc->pop(vcc,skb);
36899 else dev_kfree_skb(skb);
36900 out_vcc->push(out_vcc,new_skb);
36901- atomic_inc(&vcc->stats->tx);
36902- atomic_inc(&out_vcc->stats->rx);
36903+ atomic_inc_unchecked(&vcc->stats->tx);
36904+ atomic_inc_unchecked(&out_vcc->stats->rx);
36905 return 0;
36906 }
36907
36908@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36909 read_unlock(&vcc_sklist_lock);
36910 if (!out_vcc) {
36911 result = -EUNATCH;
36912- atomic_inc(&vcc->stats->tx_err);
36913+ atomic_inc_unchecked(&vcc->stats->tx_err);
36914 goto done;
36915 }
36916 skb_pull(skb,sizeof(struct atmtcp_hdr));
36917@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36918 __net_timestamp(new_skb);
36919 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
36920 out_vcc->push(out_vcc,new_skb);
36921- atomic_inc(&vcc->stats->tx);
36922- atomic_inc(&out_vcc->stats->rx);
36923+ atomic_inc_unchecked(&vcc->stats->tx);
36924+ atomic_inc_unchecked(&out_vcc->stats->rx);
36925 done:
36926 if (vcc->pop) vcc->pop(vcc,skb);
36927 else dev_kfree_skb(skb);
36928diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
36929index 6339efd..2b441d5 100644
36930--- a/drivers/atm/eni.c
36931+++ b/drivers/atm/eni.c
36932@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
36933 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
36934 vcc->dev->number);
36935 length = 0;
36936- atomic_inc(&vcc->stats->rx_err);
36937+ atomic_inc_unchecked(&vcc->stats->rx_err);
36938 }
36939 else {
36940 length = ATM_CELL_SIZE-1; /* no HEC */
36941@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36942 size);
36943 }
36944 eff = length = 0;
36945- atomic_inc(&vcc->stats->rx_err);
36946+ atomic_inc_unchecked(&vcc->stats->rx_err);
36947 }
36948 else {
36949 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
36950@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36951 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
36952 vcc->dev->number,vcc->vci,length,size << 2,descr);
36953 length = eff = 0;
36954- atomic_inc(&vcc->stats->rx_err);
36955+ atomic_inc_unchecked(&vcc->stats->rx_err);
36956 }
36957 }
36958 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
36959@@ -770,7 +770,7 @@ rx_dequeued++;
36960 vcc->push(vcc,skb);
36961 pushed++;
36962 }
36963- atomic_inc(&vcc->stats->rx);
36964+ atomic_inc_unchecked(&vcc->stats->rx);
36965 }
36966 wake_up(&eni_dev->rx_wait);
36967 }
36968@@ -1230,7 +1230,7 @@ static void dequeue_tx(struct atm_dev *dev)
36969 DMA_TO_DEVICE);
36970 if (vcc->pop) vcc->pop(vcc,skb);
36971 else dev_kfree_skb_irq(skb);
36972- atomic_inc(&vcc->stats->tx);
36973+ atomic_inc_unchecked(&vcc->stats->tx);
36974 wake_up(&eni_dev->tx_wait);
36975 dma_complete++;
36976 }
36977diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
36978index 82f2ae0..f205c02 100644
36979--- a/drivers/atm/firestream.c
36980+++ b/drivers/atm/firestream.c
36981@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
36982 }
36983 }
36984
36985- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36986+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36987
36988 fs_dprintk (FS_DEBUG_TXMEM, "i");
36989 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
36990@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36991 #endif
36992 skb_put (skb, qe->p1 & 0xffff);
36993 ATM_SKB(skb)->vcc = atm_vcc;
36994- atomic_inc(&atm_vcc->stats->rx);
36995+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36996 __net_timestamp(skb);
36997 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
36998 atm_vcc->push (atm_vcc, skb);
36999@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
37000 kfree (pe);
37001 }
37002 if (atm_vcc)
37003- atomic_inc(&atm_vcc->stats->rx_drop);
37004+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
37005 break;
37006 case 0x1f: /* Reassembly abort: no buffers. */
37007 /* Silently increment error counter. */
37008 if (atm_vcc)
37009- atomic_inc(&atm_vcc->stats->rx_drop);
37010+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
37011 break;
37012 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
37013 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
37014diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
37015index 75dde90..4309ead 100644
37016--- a/drivers/atm/fore200e.c
37017+++ b/drivers/atm/fore200e.c
37018@@ -932,9 +932,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
37019 #endif
37020 /* check error condition */
37021 if (*entry->status & STATUS_ERROR)
37022- atomic_inc(&vcc->stats->tx_err);
37023+ atomic_inc_unchecked(&vcc->stats->tx_err);
37024 else
37025- atomic_inc(&vcc->stats->tx);
37026+ atomic_inc_unchecked(&vcc->stats->tx);
37027 }
37028 }
37029
37030@@ -1083,7 +1083,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
37031 if (skb == NULL) {
37032 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
37033
37034- atomic_inc(&vcc->stats->rx_drop);
37035+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37036 return -ENOMEM;
37037 }
37038
37039@@ -1126,14 +1126,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
37040
37041 dev_kfree_skb_any(skb);
37042
37043- atomic_inc(&vcc->stats->rx_drop);
37044+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37045 return -ENOMEM;
37046 }
37047
37048 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
37049
37050 vcc->push(vcc, skb);
37051- atomic_inc(&vcc->stats->rx);
37052+ atomic_inc_unchecked(&vcc->stats->rx);
37053
37054 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
37055
37056@@ -1211,7 +1211,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
37057 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
37058 fore200e->atm_dev->number,
37059 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
37060- atomic_inc(&vcc->stats->rx_err);
37061+ atomic_inc_unchecked(&vcc->stats->rx_err);
37062 }
37063 }
37064
37065@@ -1656,7 +1656,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
37066 goto retry_here;
37067 }
37068
37069- atomic_inc(&vcc->stats->tx_err);
37070+ atomic_inc_unchecked(&vcc->stats->tx_err);
37071
37072 fore200e->tx_sat++;
37073 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
37074diff --git a/drivers/atm/he.c b/drivers/atm/he.c
37075index 93dca2e..c5daa69 100644
37076--- a/drivers/atm/he.c
37077+++ b/drivers/atm/he.c
37078@@ -1692,7 +1692,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
37079
37080 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
37081 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
37082- atomic_inc(&vcc->stats->rx_drop);
37083+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37084 goto return_host_buffers;
37085 }
37086
37087@@ -1719,7 +1719,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
37088 RBRQ_LEN_ERR(he_dev->rbrq_head)
37089 ? "LEN_ERR" : "",
37090 vcc->vpi, vcc->vci);
37091- atomic_inc(&vcc->stats->rx_err);
37092+ atomic_inc_unchecked(&vcc->stats->rx_err);
37093 goto return_host_buffers;
37094 }
37095
37096@@ -1771,7 +1771,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
37097 vcc->push(vcc, skb);
37098 spin_lock(&he_dev->global_lock);
37099
37100- atomic_inc(&vcc->stats->rx);
37101+ atomic_inc_unchecked(&vcc->stats->rx);
37102
37103 return_host_buffers:
37104 ++pdus_assembled;
37105@@ -2097,7 +2097,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
37106 tpd->vcc->pop(tpd->vcc, tpd->skb);
37107 else
37108 dev_kfree_skb_any(tpd->skb);
37109- atomic_inc(&tpd->vcc->stats->tx_err);
37110+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
37111 }
37112 dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
37113 return;
37114@@ -2509,7 +2509,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37115 vcc->pop(vcc, skb);
37116 else
37117 dev_kfree_skb_any(skb);
37118- atomic_inc(&vcc->stats->tx_err);
37119+ atomic_inc_unchecked(&vcc->stats->tx_err);
37120 return -EINVAL;
37121 }
37122
37123@@ -2520,7 +2520,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37124 vcc->pop(vcc, skb);
37125 else
37126 dev_kfree_skb_any(skb);
37127- atomic_inc(&vcc->stats->tx_err);
37128+ atomic_inc_unchecked(&vcc->stats->tx_err);
37129 return -EINVAL;
37130 }
37131 #endif
37132@@ -2532,7 +2532,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37133 vcc->pop(vcc, skb);
37134 else
37135 dev_kfree_skb_any(skb);
37136- atomic_inc(&vcc->stats->tx_err);
37137+ atomic_inc_unchecked(&vcc->stats->tx_err);
37138 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37139 return -ENOMEM;
37140 }
37141@@ -2574,7 +2574,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37142 vcc->pop(vcc, skb);
37143 else
37144 dev_kfree_skb_any(skb);
37145- atomic_inc(&vcc->stats->tx_err);
37146+ atomic_inc_unchecked(&vcc->stats->tx_err);
37147 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37148 return -ENOMEM;
37149 }
37150@@ -2605,7 +2605,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37151 __enqueue_tpd(he_dev, tpd, cid);
37152 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37153
37154- atomic_inc(&vcc->stats->tx);
37155+ atomic_inc_unchecked(&vcc->stats->tx);
37156
37157 return 0;
37158 }
37159diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
37160index 527bbd5..96570c8 100644
37161--- a/drivers/atm/horizon.c
37162+++ b/drivers/atm/horizon.c
37163@@ -1018,7 +1018,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
37164 {
37165 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
37166 // VC layer stats
37167- atomic_inc(&vcc->stats->rx);
37168+ atomic_inc_unchecked(&vcc->stats->rx);
37169 __net_timestamp(skb);
37170 // end of our responsibility
37171 vcc->push (vcc, skb);
37172@@ -1170,7 +1170,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
37173 dev->tx_iovec = NULL;
37174
37175 // VC layer stats
37176- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
37177+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
37178
37179 // free the skb
37180 hrz_kfree_skb (skb);
37181diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
37182index 074616b..d6b3d5f 100644
37183--- a/drivers/atm/idt77252.c
37184+++ b/drivers/atm/idt77252.c
37185@@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
37186 else
37187 dev_kfree_skb(skb);
37188
37189- atomic_inc(&vcc->stats->tx);
37190+ atomic_inc_unchecked(&vcc->stats->tx);
37191 }
37192
37193 atomic_dec(&scq->used);
37194@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37195 if ((sb = dev_alloc_skb(64)) == NULL) {
37196 printk("%s: Can't allocate buffers for aal0.\n",
37197 card->name);
37198- atomic_add(i, &vcc->stats->rx_drop);
37199+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37200 break;
37201 }
37202 if (!atm_charge(vcc, sb->truesize)) {
37203 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
37204 card->name);
37205- atomic_add(i - 1, &vcc->stats->rx_drop);
37206+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
37207 dev_kfree_skb(sb);
37208 break;
37209 }
37210@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37211 ATM_SKB(sb)->vcc = vcc;
37212 __net_timestamp(sb);
37213 vcc->push(vcc, sb);
37214- atomic_inc(&vcc->stats->rx);
37215+ atomic_inc_unchecked(&vcc->stats->rx);
37216
37217 cell += ATM_CELL_PAYLOAD;
37218 }
37219@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37220 "(CDC: %08x)\n",
37221 card->name, len, rpp->len, readl(SAR_REG_CDC));
37222 recycle_rx_pool_skb(card, rpp);
37223- atomic_inc(&vcc->stats->rx_err);
37224+ atomic_inc_unchecked(&vcc->stats->rx_err);
37225 return;
37226 }
37227 if (stat & SAR_RSQE_CRC) {
37228 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
37229 recycle_rx_pool_skb(card, rpp);
37230- atomic_inc(&vcc->stats->rx_err);
37231+ atomic_inc_unchecked(&vcc->stats->rx_err);
37232 return;
37233 }
37234 if (skb_queue_len(&rpp->queue) > 1) {
37235@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37236 RXPRINTK("%s: Can't alloc RX skb.\n",
37237 card->name);
37238 recycle_rx_pool_skb(card, rpp);
37239- atomic_inc(&vcc->stats->rx_err);
37240+ atomic_inc_unchecked(&vcc->stats->rx_err);
37241 return;
37242 }
37243 if (!atm_charge(vcc, skb->truesize)) {
37244@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37245 __net_timestamp(skb);
37246
37247 vcc->push(vcc, skb);
37248- atomic_inc(&vcc->stats->rx);
37249+ atomic_inc_unchecked(&vcc->stats->rx);
37250
37251 return;
37252 }
37253@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37254 __net_timestamp(skb);
37255
37256 vcc->push(vcc, skb);
37257- atomic_inc(&vcc->stats->rx);
37258+ atomic_inc_unchecked(&vcc->stats->rx);
37259
37260 if (skb->truesize > SAR_FB_SIZE_3)
37261 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
37262@@ -1302,14 +1302,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
37263 if (vcc->qos.aal != ATM_AAL0) {
37264 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
37265 card->name, vpi, vci);
37266- atomic_inc(&vcc->stats->rx_drop);
37267+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37268 goto drop;
37269 }
37270
37271 if ((sb = dev_alloc_skb(64)) == NULL) {
37272 printk("%s: Can't allocate buffers for AAL0.\n",
37273 card->name);
37274- atomic_inc(&vcc->stats->rx_err);
37275+ atomic_inc_unchecked(&vcc->stats->rx_err);
37276 goto drop;
37277 }
37278
37279@@ -1328,7 +1328,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
37280 ATM_SKB(sb)->vcc = vcc;
37281 __net_timestamp(sb);
37282 vcc->push(vcc, sb);
37283- atomic_inc(&vcc->stats->rx);
37284+ atomic_inc_unchecked(&vcc->stats->rx);
37285
37286 drop:
37287 skb_pull(queue, 64);
37288@@ -1953,13 +1953,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37289
37290 if (vc == NULL) {
37291 printk("%s: NULL connection in send().\n", card->name);
37292- atomic_inc(&vcc->stats->tx_err);
37293+ atomic_inc_unchecked(&vcc->stats->tx_err);
37294 dev_kfree_skb(skb);
37295 return -EINVAL;
37296 }
37297 if (!test_bit(VCF_TX, &vc->flags)) {
37298 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
37299- atomic_inc(&vcc->stats->tx_err);
37300+ atomic_inc_unchecked(&vcc->stats->tx_err);
37301 dev_kfree_skb(skb);
37302 return -EINVAL;
37303 }
37304@@ -1971,14 +1971,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37305 break;
37306 default:
37307 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
37308- atomic_inc(&vcc->stats->tx_err);
37309+ atomic_inc_unchecked(&vcc->stats->tx_err);
37310 dev_kfree_skb(skb);
37311 return -EINVAL;
37312 }
37313
37314 if (skb_shinfo(skb)->nr_frags != 0) {
37315 printk("%s: No scatter-gather yet.\n", card->name);
37316- atomic_inc(&vcc->stats->tx_err);
37317+ atomic_inc_unchecked(&vcc->stats->tx_err);
37318 dev_kfree_skb(skb);
37319 return -EINVAL;
37320 }
37321@@ -1986,7 +1986,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37322
37323 err = queue_skb(card, vc, skb, oam);
37324 if (err) {
37325- atomic_inc(&vcc->stats->tx_err);
37326+ atomic_inc_unchecked(&vcc->stats->tx_err);
37327 dev_kfree_skb(skb);
37328 return err;
37329 }
37330@@ -2009,7 +2009,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
37331 skb = dev_alloc_skb(64);
37332 if (!skb) {
37333 printk("%s: Out of memory in send_oam().\n", card->name);
37334- atomic_inc(&vcc->stats->tx_err);
37335+ atomic_inc_unchecked(&vcc->stats->tx_err);
37336 return -ENOMEM;
37337 }
37338 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
37339diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
37340index 924f8e2..3375a3e 100644
37341--- a/drivers/atm/iphase.c
37342+++ b/drivers/atm/iphase.c
37343@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
37344 status = (u_short) (buf_desc_ptr->desc_mode);
37345 if (status & (RX_CER | RX_PTE | RX_OFL))
37346 {
37347- atomic_inc(&vcc->stats->rx_err);
37348+ atomic_inc_unchecked(&vcc->stats->rx_err);
37349 IF_ERR(printk("IA: bad packet, dropping it");)
37350 if (status & RX_CER) {
37351 IF_ERR(printk(" cause: packet CRC error\n");)
37352@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
37353 len = dma_addr - buf_addr;
37354 if (len > iadev->rx_buf_sz) {
37355 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
37356- atomic_inc(&vcc->stats->rx_err);
37357+ atomic_inc_unchecked(&vcc->stats->rx_err);
37358 goto out_free_desc;
37359 }
37360
37361@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37362 ia_vcc = INPH_IA_VCC(vcc);
37363 if (ia_vcc == NULL)
37364 {
37365- atomic_inc(&vcc->stats->rx_err);
37366+ atomic_inc_unchecked(&vcc->stats->rx_err);
37367 atm_return(vcc, skb->truesize);
37368 dev_kfree_skb_any(skb);
37369 goto INCR_DLE;
37370@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37371 if ((length > iadev->rx_buf_sz) || (length >
37372 (skb->len - sizeof(struct cpcs_trailer))))
37373 {
37374- atomic_inc(&vcc->stats->rx_err);
37375+ atomic_inc_unchecked(&vcc->stats->rx_err);
37376 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
37377 length, skb->len);)
37378 atm_return(vcc, skb->truesize);
37379@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37380
37381 IF_RX(printk("rx_dle_intr: skb push");)
37382 vcc->push(vcc,skb);
37383- atomic_inc(&vcc->stats->rx);
37384+ atomic_inc_unchecked(&vcc->stats->rx);
37385 iadev->rx_pkt_cnt++;
37386 }
37387 INCR_DLE:
37388@@ -2828,15 +2828,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
37389 {
37390 struct k_sonet_stats *stats;
37391 stats = &PRIV(_ia_dev[board])->sonet_stats;
37392- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
37393- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
37394- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
37395- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
37396- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
37397- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
37398- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
37399- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
37400- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
37401+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
37402+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
37403+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
37404+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
37405+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
37406+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
37407+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
37408+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
37409+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
37410 }
37411 ia_cmds.status = 0;
37412 break;
37413@@ -2941,7 +2941,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37414 if ((desc == 0) || (desc > iadev->num_tx_desc))
37415 {
37416 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
37417- atomic_inc(&vcc->stats->tx);
37418+ atomic_inc_unchecked(&vcc->stats->tx);
37419 if (vcc->pop)
37420 vcc->pop(vcc, skb);
37421 else
37422@@ -3046,14 +3046,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37423 ATM_DESC(skb) = vcc->vci;
37424 skb_queue_tail(&iadev->tx_dma_q, skb);
37425
37426- atomic_inc(&vcc->stats->tx);
37427+ atomic_inc_unchecked(&vcc->stats->tx);
37428 iadev->tx_pkt_cnt++;
37429 /* Increment transaction counter */
37430 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
37431
37432 #if 0
37433 /* add flow control logic */
37434- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
37435+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
37436 if (iavcc->vc_desc_cnt > 10) {
37437 vcc->tx_quota = vcc->tx_quota * 3 / 4;
37438 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
37439diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
37440index ce43ae3..969de38 100644
37441--- a/drivers/atm/lanai.c
37442+++ b/drivers/atm/lanai.c
37443@@ -1295,7 +1295,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
37444 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
37445 lanai_endtx(lanai, lvcc);
37446 lanai_free_skb(lvcc->tx.atmvcc, skb);
37447- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
37448+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
37449 }
37450
37451 /* Try to fill the buffer - don't call unless there is backlog */
37452@@ -1418,7 +1418,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
37453 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
37454 __net_timestamp(skb);
37455 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
37456- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
37457+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
37458 out:
37459 lvcc->rx.buf.ptr = end;
37460 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
37461@@ -1659,7 +1659,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37462 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
37463 "vcc %d\n", lanai->number, (unsigned int) s, vci);
37464 lanai->stats.service_rxnotaal5++;
37465- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37466+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37467 return 0;
37468 }
37469 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
37470@@ -1671,7 +1671,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37471 int bytes;
37472 read_unlock(&vcc_sklist_lock);
37473 DPRINTK("got trashed rx pdu on vci %d\n", vci);
37474- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37475+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37476 lvcc->stats.x.aal5.service_trash++;
37477 bytes = (SERVICE_GET_END(s) * 16) -
37478 (((unsigned long) lvcc->rx.buf.ptr) -
37479@@ -1683,7 +1683,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37480 }
37481 if (s & SERVICE_STREAM) {
37482 read_unlock(&vcc_sklist_lock);
37483- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37484+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37485 lvcc->stats.x.aal5.service_stream++;
37486 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
37487 "PDU on VCI %d!\n", lanai->number, vci);
37488@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37489 return 0;
37490 }
37491 DPRINTK("got rx crc error on vci %d\n", vci);
37492- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37493+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37494 lvcc->stats.x.aal5.service_rxcrc++;
37495 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
37496 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
37497diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
37498index b7e1cc0..eb336bfe 100644
37499--- a/drivers/atm/nicstar.c
37500+++ b/drivers/atm/nicstar.c
37501@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37502 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
37503 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
37504 card->index);
37505- atomic_inc(&vcc->stats->tx_err);
37506+ atomic_inc_unchecked(&vcc->stats->tx_err);
37507 dev_kfree_skb_any(skb);
37508 return -EINVAL;
37509 }
37510@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37511 if (!vc->tx) {
37512 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
37513 card->index);
37514- atomic_inc(&vcc->stats->tx_err);
37515+ atomic_inc_unchecked(&vcc->stats->tx_err);
37516 dev_kfree_skb_any(skb);
37517 return -EINVAL;
37518 }
37519@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37520 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
37521 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
37522 card->index);
37523- atomic_inc(&vcc->stats->tx_err);
37524+ atomic_inc_unchecked(&vcc->stats->tx_err);
37525 dev_kfree_skb_any(skb);
37526 return -EINVAL;
37527 }
37528
37529 if (skb_shinfo(skb)->nr_frags != 0) {
37530 printk("nicstar%d: No scatter-gather yet.\n", card->index);
37531- atomic_inc(&vcc->stats->tx_err);
37532+ atomic_inc_unchecked(&vcc->stats->tx_err);
37533 dev_kfree_skb_any(skb);
37534 return -EINVAL;
37535 }
37536@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37537 }
37538
37539 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
37540- atomic_inc(&vcc->stats->tx_err);
37541+ atomic_inc_unchecked(&vcc->stats->tx_err);
37542 dev_kfree_skb_any(skb);
37543 return -EIO;
37544 }
37545- atomic_inc(&vcc->stats->tx);
37546+ atomic_inc_unchecked(&vcc->stats->tx);
37547
37548 return 0;
37549 }
37550@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37551 printk
37552 ("nicstar%d: Can't allocate buffers for aal0.\n",
37553 card->index);
37554- atomic_add(i, &vcc->stats->rx_drop);
37555+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37556 break;
37557 }
37558 if (!atm_charge(vcc, sb->truesize)) {
37559 RXPRINTK
37560 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
37561 card->index);
37562- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37563+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37564 dev_kfree_skb_any(sb);
37565 break;
37566 }
37567@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37568 ATM_SKB(sb)->vcc = vcc;
37569 __net_timestamp(sb);
37570 vcc->push(vcc, sb);
37571- atomic_inc(&vcc->stats->rx);
37572+ atomic_inc_unchecked(&vcc->stats->rx);
37573 cell += ATM_CELL_PAYLOAD;
37574 }
37575
37576@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37577 if (iovb == NULL) {
37578 printk("nicstar%d: Out of iovec buffers.\n",
37579 card->index);
37580- atomic_inc(&vcc->stats->rx_drop);
37581+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37582 recycle_rx_buf(card, skb);
37583 return;
37584 }
37585@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37586 small or large buffer itself. */
37587 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
37588 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
37589- atomic_inc(&vcc->stats->rx_err);
37590+ atomic_inc_unchecked(&vcc->stats->rx_err);
37591 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37592 NS_MAX_IOVECS);
37593 NS_PRV_IOVCNT(iovb) = 0;
37594@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37595 ("nicstar%d: Expected a small buffer, and this is not one.\n",
37596 card->index);
37597 which_list(card, skb);
37598- atomic_inc(&vcc->stats->rx_err);
37599+ atomic_inc_unchecked(&vcc->stats->rx_err);
37600 recycle_rx_buf(card, skb);
37601 vc->rx_iov = NULL;
37602 recycle_iov_buf(card, iovb);
37603@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37604 ("nicstar%d: Expected a large buffer, and this is not one.\n",
37605 card->index);
37606 which_list(card, skb);
37607- atomic_inc(&vcc->stats->rx_err);
37608+ atomic_inc_unchecked(&vcc->stats->rx_err);
37609 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37610 NS_PRV_IOVCNT(iovb));
37611 vc->rx_iov = NULL;
37612@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37613 printk(" - PDU size mismatch.\n");
37614 else
37615 printk(".\n");
37616- atomic_inc(&vcc->stats->rx_err);
37617+ atomic_inc_unchecked(&vcc->stats->rx_err);
37618 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37619 NS_PRV_IOVCNT(iovb));
37620 vc->rx_iov = NULL;
37621@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37622 /* skb points to a small buffer */
37623 if (!atm_charge(vcc, skb->truesize)) {
37624 push_rxbufs(card, skb);
37625- atomic_inc(&vcc->stats->rx_drop);
37626+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37627 } else {
37628 skb_put(skb, len);
37629 dequeue_sm_buf(card, skb);
37630@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37631 ATM_SKB(skb)->vcc = vcc;
37632 __net_timestamp(skb);
37633 vcc->push(vcc, skb);
37634- atomic_inc(&vcc->stats->rx);
37635+ atomic_inc_unchecked(&vcc->stats->rx);
37636 }
37637 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
37638 struct sk_buff *sb;
37639@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37640 if (len <= NS_SMBUFSIZE) {
37641 if (!atm_charge(vcc, sb->truesize)) {
37642 push_rxbufs(card, sb);
37643- atomic_inc(&vcc->stats->rx_drop);
37644+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37645 } else {
37646 skb_put(sb, len);
37647 dequeue_sm_buf(card, sb);
37648@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37649 ATM_SKB(sb)->vcc = vcc;
37650 __net_timestamp(sb);
37651 vcc->push(vcc, sb);
37652- atomic_inc(&vcc->stats->rx);
37653+ atomic_inc_unchecked(&vcc->stats->rx);
37654 }
37655
37656 push_rxbufs(card, skb);
37657@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37658
37659 if (!atm_charge(vcc, skb->truesize)) {
37660 push_rxbufs(card, skb);
37661- atomic_inc(&vcc->stats->rx_drop);
37662+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37663 } else {
37664 dequeue_lg_buf(card, skb);
37665 #ifdef NS_USE_DESTRUCTORS
37666@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37667 ATM_SKB(skb)->vcc = vcc;
37668 __net_timestamp(skb);
37669 vcc->push(vcc, skb);
37670- atomic_inc(&vcc->stats->rx);
37671+ atomic_inc_unchecked(&vcc->stats->rx);
37672 }
37673
37674 push_rxbufs(card, sb);
37675@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37676 printk
37677 ("nicstar%d: Out of huge buffers.\n",
37678 card->index);
37679- atomic_inc(&vcc->stats->rx_drop);
37680+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37681 recycle_iovec_rx_bufs(card,
37682 (struct iovec *)
37683 iovb->data,
37684@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37685 card->hbpool.count++;
37686 } else
37687 dev_kfree_skb_any(hb);
37688- atomic_inc(&vcc->stats->rx_drop);
37689+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37690 } else {
37691 /* Copy the small buffer to the huge buffer */
37692 sb = (struct sk_buff *)iov->iov_base;
37693@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37694 #endif /* NS_USE_DESTRUCTORS */
37695 __net_timestamp(hb);
37696 vcc->push(vcc, hb);
37697- atomic_inc(&vcc->stats->rx);
37698+ atomic_inc_unchecked(&vcc->stats->rx);
37699 }
37700 }
37701
37702diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
37703index 74e18b0..f16afa0 100644
37704--- a/drivers/atm/solos-pci.c
37705+++ b/drivers/atm/solos-pci.c
37706@@ -838,7 +838,7 @@ static void solos_bh(unsigned long card_arg)
37707 }
37708 atm_charge(vcc, skb->truesize);
37709 vcc->push(vcc, skb);
37710- atomic_inc(&vcc->stats->rx);
37711+ atomic_inc_unchecked(&vcc->stats->rx);
37712 break;
37713
37714 case PKT_STATUS:
37715@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
37716 vcc = SKB_CB(oldskb)->vcc;
37717
37718 if (vcc) {
37719- atomic_inc(&vcc->stats->tx);
37720+ atomic_inc_unchecked(&vcc->stats->tx);
37721 solos_pop(vcc, oldskb);
37722 } else {
37723 dev_kfree_skb_irq(oldskb);
37724diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
37725index 0215934..ce9f5b1 100644
37726--- a/drivers/atm/suni.c
37727+++ b/drivers/atm/suni.c
37728@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
37729
37730
37731 #define ADD_LIMITED(s,v) \
37732- atomic_add((v),&stats->s); \
37733- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
37734+ atomic_add_unchecked((v),&stats->s); \
37735+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
37736
37737
37738 static void suni_hz(unsigned long from_timer)
37739diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
37740index 5120a96..e2572bd 100644
37741--- a/drivers/atm/uPD98402.c
37742+++ b/drivers/atm/uPD98402.c
37743@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
37744 struct sonet_stats tmp;
37745 int error = 0;
37746
37747- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37748+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37749 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
37750 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
37751 if (zero && !error) {
37752@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
37753
37754
37755 #define ADD_LIMITED(s,v) \
37756- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
37757- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
37758- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37759+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
37760+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
37761+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37762
37763
37764 static void stat_event(struct atm_dev *dev)
37765@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
37766 if (reason & uPD98402_INT_PFM) stat_event(dev);
37767 if (reason & uPD98402_INT_PCO) {
37768 (void) GET(PCOCR); /* clear interrupt cause */
37769- atomic_add(GET(HECCT),
37770+ atomic_add_unchecked(GET(HECCT),
37771 &PRIV(dev)->sonet_stats.uncorr_hcs);
37772 }
37773 if ((reason & uPD98402_INT_RFO) &&
37774@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
37775 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
37776 uPD98402_INT_LOS),PIMR); /* enable them */
37777 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
37778- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37779- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
37780- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
37781+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37782+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
37783+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
37784 return 0;
37785 }
37786
37787diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
37788index cecfb94..87009ec 100644
37789--- a/drivers/atm/zatm.c
37790+++ b/drivers/atm/zatm.c
37791@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37792 }
37793 if (!size) {
37794 dev_kfree_skb_irq(skb);
37795- if (vcc) atomic_inc(&vcc->stats->rx_err);
37796+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
37797 continue;
37798 }
37799 if (!atm_charge(vcc,skb->truesize)) {
37800@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37801 skb->len = size;
37802 ATM_SKB(skb)->vcc = vcc;
37803 vcc->push(vcc,skb);
37804- atomic_inc(&vcc->stats->rx);
37805+ atomic_inc_unchecked(&vcc->stats->rx);
37806 }
37807 zout(pos & 0xffff,MTA(mbx));
37808 #if 0 /* probably a stupid idea */
37809@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
37810 skb_queue_head(&zatm_vcc->backlog,skb);
37811 break;
37812 }
37813- atomic_inc(&vcc->stats->tx);
37814+ atomic_inc_unchecked(&vcc->stats->tx);
37815 wake_up(&zatm_vcc->tx_wait);
37816 }
37817
37818diff --git a/drivers/base/bus.c b/drivers/base/bus.c
37819index 79bc203..fa3945b 100644
37820--- a/drivers/base/bus.c
37821+++ b/drivers/base/bus.c
37822@@ -1126,7 +1126,7 @@ int subsys_interface_register(struct subsys_interface *sif)
37823 return -EINVAL;
37824
37825 mutex_lock(&subsys->p->mutex);
37826- list_add_tail(&sif->node, &subsys->p->interfaces);
37827+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
37828 if (sif->add_dev) {
37829 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37830 while ((dev = subsys_dev_iter_next(&iter)))
37831@@ -1151,7 +1151,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
37832 subsys = sif->subsys;
37833
37834 mutex_lock(&subsys->p->mutex);
37835- list_del_init(&sif->node);
37836+ pax_list_del_init((struct list_head *)&sif->node);
37837 if (sif->remove_dev) {
37838 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37839 while ((dev = subsys_dev_iter_next(&iter)))
37840diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
37841index 25798db..15f130e 100644
37842--- a/drivers/base/devtmpfs.c
37843+++ b/drivers/base/devtmpfs.c
37844@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
37845 if (!thread)
37846 return 0;
37847
37848- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
37849+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
37850 if (err)
37851 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
37852 else
37853@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
37854 *err = sys_unshare(CLONE_NEWNS);
37855 if (*err)
37856 goto out;
37857- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
37858+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
37859 if (*err)
37860 goto out;
37861- sys_chdir("/.."); /* will traverse into overmounted root */
37862- sys_chroot(".");
37863+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
37864+ sys_chroot((char __force_user *)".");
37865 complete(&setup_done);
37866 while (1) {
37867 spin_lock(&req_lock);
37868diff --git a/drivers/base/node.c b/drivers/base/node.c
37869index 36fabe43..8cfc112 100644
37870--- a/drivers/base/node.c
37871+++ b/drivers/base/node.c
37872@@ -615,7 +615,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
37873 struct node_attr {
37874 struct device_attribute attr;
37875 enum node_states state;
37876-};
37877+} __do_const;
37878
37879 static ssize_t show_node_state(struct device *dev,
37880 struct device_attribute *attr, char *buf)
37881diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
37882index 45937f8..b9a342e 100644
37883--- a/drivers/base/power/domain.c
37884+++ b/drivers/base/power/domain.c
37885@@ -1698,7 +1698,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
37886 {
37887 struct cpuidle_driver *cpuidle_drv;
37888 struct gpd_cpuidle_data *cpuidle_data;
37889- struct cpuidle_state *idle_state;
37890+ cpuidle_state_no_const *idle_state;
37891 int ret = 0;
37892
37893 if (IS_ERR_OR_NULL(genpd) || state < 0)
37894@@ -1766,7 +1766,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
37895 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
37896 {
37897 struct gpd_cpuidle_data *cpuidle_data;
37898- struct cpuidle_state *idle_state;
37899+ cpuidle_state_no_const *idle_state;
37900 int ret = 0;
37901
37902 if (IS_ERR_OR_NULL(genpd))
37903@@ -2195,7 +2195,10 @@ int genpd_dev_pm_attach(struct device *dev)
37904 return ret;
37905 }
37906
37907- dev->pm_domain->detach = genpd_dev_pm_detach;
37908+ pax_open_kernel();
37909+ *(void **)&dev->pm_domain->detach = genpd_dev_pm_detach;
37910+ pax_close_kernel();
37911+
37912 pm_genpd_poweron(pd);
37913
37914 return 0;
37915diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
37916index d2be3f9..0a3167a 100644
37917--- a/drivers/base/power/sysfs.c
37918+++ b/drivers/base/power/sysfs.c
37919@@ -181,7 +181,7 @@ static ssize_t rtpm_status_show(struct device *dev,
37920 return -EIO;
37921 }
37922 }
37923- return sprintf(buf, p);
37924+ return sprintf(buf, "%s", p);
37925 }
37926
37927 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
37928diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
37929index aab7158..b172db2 100644
37930--- a/drivers/base/power/wakeup.c
37931+++ b/drivers/base/power/wakeup.c
37932@@ -32,14 +32,14 @@ static bool pm_abort_suspend __read_mostly;
37933 * They need to be modified together atomically, so it's better to use one
37934 * atomic variable to hold them both.
37935 */
37936-static atomic_t combined_event_count = ATOMIC_INIT(0);
37937+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
37938
37939 #define IN_PROGRESS_BITS (sizeof(int) * 4)
37940 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
37941
37942 static void split_counters(unsigned int *cnt, unsigned int *inpr)
37943 {
37944- unsigned int comb = atomic_read(&combined_event_count);
37945+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
37946
37947 *cnt = (comb >> IN_PROGRESS_BITS);
37948 *inpr = comb & MAX_IN_PROGRESS;
37949@@ -404,7 +404,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
37950 ws->start_prevent_time = ws->last_time;
37951
37952 /* Increment the counter of events in progress. */
37953- cec = atomic_inc_return(&combined_event_count);
37954+ cec = atomic_inc_return_unchecked(&combined_event_count);
37955
37956 trace_wakeup_source_activate(ws->name, cec);
37957 }
37958@@ -530,7 +530,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
37959 * Increment the counter of registered wakeup events and decrement the
37960 * couter of wakeup events in progress simultaneously.
37961 */
37962- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
37963+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
37964 trace_wakeup_source_deactivate(ws->name, cec);
37965
37966 split_counters(&cnt, &inpr);
37967diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
37968index 8d98a32..61d3165 100644
37969--- a/drivers/base/syscore.c
37970+++ b/drivers/base/syscore.c
37971@@ -22,7 +22,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
37972 void register_syscore_ops(struct syscore_ops *ops)
37973 {
37974 mutex_lock(&syscore_ops_lock);
37975- list_add_tail(&ops->node, &syscore_ops_list);
37976+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
37977 mutex_unlock(&syscore_ops_lock);
37978 }
37979 EXPORT_SYMBOL_GPL(register_syscore_ops);
37980@@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
37981 void unregister_syscore_ops(struct syscore_ops *ops)
37982 {
37983 mutex_lock(&syscore_ops_lock);
37984- list_del(&ops->node);
37985+ pax_list_del((struct list_head *)&ops->node);
37986 mutex_unlock(&syscore_ops_lock);
37987 }
37988 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
37989diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
37990index ff20f19..018f1da 100644
37991--- a/drivers/block/cciss.c
37992+++ b/drivers/block/cciss.c
37993@@ -3008,7 +3008,7 @@ static void start_io(ctlr_info_t *h)
37994 while (!list_empty(&h->reqQ)) {
37995 c = list_entry(h->reqQ.next, CommandList_struct, list);
37996 /* can't do anything if fifo is full */
37997- if ((h->access.fifo_full(h))) {
37998+ if ((h->access->fifo_full(h))) {
37999 dev_warn(&h->pdev->dev, "fifo full\n");
38000 break;
38001 }
38002@@ -3018,7 +3018,7 @@ static void start_io(ctlr_info_t *h)
38003 h->Qdepth--;
38004
38005 /* Tell the controller execute command */
38006- h->access.submit_command(h, c);
38007+ h->access->submit_command(h, c);
38008
38009 /* Put job onto the completed Q */
38010 addQ(&h->cmpQ, c);
38011@@ -3444,17 +3444,17 @@ startio:
38012
38013 static inline unsigned long get_next_completion(ctlr_info_t *h)
38014 {
38015- return h->access.command_completed(h);
38016+ return h->access->command_completed(h);
38017 }
38018
38019 static inline int interrupt_pending(ctlr_info_t *h)
38020 {
38021- return h->access.intr_pending(h);
38022+ return h->access->intr_pending(h);
38023 }
38024
38025 static inline long interrupt_not_for_us(ctlr_info_t *h)
38026 {
38027- return ((h->access.intr_pending(h) == 0) ||
38028+ return ((h->access->intr_pending(h) == 0) ||
38029 (h->interrupts_enabled == 0));
38030 }
38031
38032@@ -3487,7 +3487,7 @@ static inline u32 next_command(ctlr_info_t *h)
38033 u32 a;
38034
38035 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
38036- return h->access.command_completed(h);
38037+ return h->access->command_completed(h);
38038
38039 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
38040 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
38041@@ -4044,7 +4044,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
38042 trans_support & CFGTBL_Trans_use_short_tags);
38043
38044 /* Change the access methods to the performant access methods */
38045- h->access = SA5_performant_access;
38046+ h->access = &SA5_performant_access;
38047 h->transMethod = CFGTBL_Trans_Performant;
38048
38049 return;
38050@@ -4318,7 +4318,7 @@ static int cciss_pci_init(ctlr_info_t *h)
38051 if (prod_index < 0)
38052 return -ENODEV;
38053 h->product_name = products[prod_index].product_name;
38054- h->access = *(products[prod_index].access);
38055+ h->access = products[prod_index].access;
38056
38057 if (cciss_board_disabled(h)) {
38058 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
38059@@ -5050,7 +5050,7 @@ reinit_after_soft_reset:
38060 }
38061
38062 /* make sure the board interrupts are off */
38063- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38064+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38065 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
38066 if (rc)
38067 goto clean2;
38068@@ -5100,7 +5100,7 @@ reinit_after_soft_reset:
38069 * fake ones to scoop up any residual completions.
38070 */
38071 spin_lock_irqsave(&h->lock, flags);
38072- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38073+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38074 spin_unlock_irqrestore(&h->lock, flags);
38075 free_irq(h->intr[h->intr_mode], h);
38076 rc = cciss_request_irq(h, cciss_msix_discard_completions,
38077@@ -5120,9 +5120,9 @@ reinit_after_soft_reset:
38078 dev_info(&h->pdev->dev, "Board READY.\n");
38079 dev_info(&h->pdev->dev,
38080 "Waiting for stale completions to drain.\n");
38081- h->access.set_intr_mask(h, CCISS_INTR_ON);
38082+ h->access->set_intr_mask(h, CCISS_INTR_ON);
38083 msleep(10000);
38084- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38085+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38086
38087 rc = controller_reset_failed(h->cfgtable);
38088 if (rc)
38089@@ -5145,7 +5145,7 @@ reinit_after_soft_reset:
38090 cciss_scsi_setup(h);
38091
38092 /* Turn the interrupts on so we can service requests */
38093- h->access.set_intr_mask(h, CCISS_INTR_ON);
38094+ h->access->set_intr_mask(h, CCISS_INTR_ON);
38095
38096 /* Get the firmware version */
38097 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
38098@@ -5217,7 +5217,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
38099 kfree(flush_buf);
38100 if (return_code != IO_OK)
38101 dev_warn(&h->pdev->dev, "Error flushing cache\n");
38102- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38103+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38104 free_irq(h->intr[h->intr_mode], h);
38105 }
38106
38107diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
38108index 7fda30e..2f27946 100644
38109--- a/drivers/block/cciss.h
38110+++ b/drivers/block/cciss.h
38111@@ -101,7 +101,7 @@ struct ctlr_info
38112 /* information about each logical volume */
38113 drive_info_struct *drv[CISS_MAX_LUN];
38114
38115- struct access_method access;
38116+ struct access_method *access;
38117
38118 /* queue and queue Info */
38119 struct list_head reqQ;
38120@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
38121 }
38122
38123 static struct access_method SA5_access = {
38124- SA5_submit_command,
38125- SA5_intr_mask,
38126- SA5_fifo_full,
38127- SA5_intr_pending,
38128- SA5_completed,
38129+ .submit_command = SA5_submit_command,
38130+ .set_intr_mask = SA5_intr_mask,
38131+ .fifo_full = SA5_fifo_full,
38132+ .intr_pending = SA5_intr_pending,
38133+ .command_completed = SA5_completed,
38134 };
38135
38136 static struct access_method SA5B_access = {
38137- SA5_submit_command,
38138- SA5B_intr_mask,
38139- SA5_fifo_full,
38140- SA5B_intr_pending,
38141- SA5_completed,
38142+ .submit_command = SA5_submit_command,
38143+ .set_intr_mask = SA5B_intr_mask,
38144+ .fifo_full = SA5_fifo_full,
38145+ .intr_pending = SA5B_intr_pending,
38146+ .command_completed = SA5_completed,
38147 };
38148
38149 static struct access_method SA5_performant_access = {
38150- SA5_submit_command,
38151- SA5_performant_intr_mask,
38152- SA5_fifo_full,
38153- SA5_performant_intr_pending,
38154- SA5_performant_completed,
38155+ .submit_command = SA5_submit_command,
38156+ .set_intr_mask = SA5_performant_intr_mask,
38157+ .fifo_full = SA5_fifo_full,
38158+ .intr_pending = SA5_performant_intr_pending,
38159+ .command_completed = SA5_performant_completed,
38160 };
38161
38162 struct board_type {
38163diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
38164index 2b94403..fd6ad1f 100644
38165--- a/drivers/block/cpqarray.c
38166+++ b/drivers/block/cpqarray.c
38167@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
38168 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
38169 goto Enomem4;
38170 }
38171- hba[i]->access.set_intr_mask(hba[i], 0);
38172+ hba[i]->access->set_intr_mask(hba[i], 0);
38173 if (request_irq(hba[i]->intr, do_ida_intr,
38174 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
38175 {
38176@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
38177 add_timer(&hba[i]->timer);
38178
38179 /* Enable IRQ now that spinlock and rate limit timer are set up */
38180- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
38181+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
38182
38183 for(j=0; j<NWD; j++) {
38184 struct gendisk *disk = ida_gendisk[i][j];
38185@@ -694,7 +694,7 @@ DBGINFO(
38186 for(i=0; i<NR_PRODUCTS; i++) {
38187 if (board_id == products[i].board_id) {
38188 c->product_name = products[i].product_name;
38189- c->access = *(products[i].access);
38190+ c->access = products[i].access;
38191 break;
38192 }
38193 }
38194@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
38195 hba[ctlr]->intr = intr;
38196 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
38197 hba[ctlr]->product_name = products[j].product_name;
38198- hba[ctlr]->access = *(products[j].access);
38199+ hba[ctlr]->access = products[j].access;
38200 hba[ctlr]->ctlr = ctlr;
38201 hba[ctlr]->board_id = board_id;
38202 hba[ctlr]->pci_dev = NULL; /* not PCI */
38203@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
38204
38205 while((c = h->reqQ) != NULL) {
38206 /* Can't do anything if we're busy */
38207- if (h->access.fifo_full(h) == 0)
38208+ if (h->access->fifo_full(h) == 0)
38209 return;
38210
38211 /* Get the first entry from the request Q */
38212@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
38213 h->Qdepth--;
38214
38215 /* Tell the controller to do our bidding */
38216- h->access.submit_command(h, c);
38217+ h->access->submit_command(h, c);
38218
38219 /* Get onto the completion Q */
38220 addQ(&h->cmpQ, c);
38221@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38222 unsigned long flags;
38223 __u32 a,a1;
38224
38225- istat = h->access.intr_pending(h);
38226+ istat = h->access->intr_pending(h);
38227 /* Is this interrupt for us? */
38228 if (istat == 0)
38229 return IRQ_NONE;
38230@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38231 */
38232 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
38233 if (istat & FIFO_NOT_EMPTY) {
38234- while((a = h->access.command_completed(h))) {
38235+ while((a = h->access->command_completed(h))) {
38236 a1 = a; a &= ~3;
38237 if ((c = h->cmpQ) == NULL)
38238 {
38239@@ -1448,11 +1448,11 @@ static int sendcmd(
38240 /*
38241 * Disable interrupt
38242 */
38243- info_p->access.set_intr_mask(info_p, 0);
38244+ info_p->access->set_intr_mask(info_p, 0);
38245 /* Make sure there is room in the command FIFO */
38246 /* Actually it should be completely empty at this time. */
38247 for (i = 200000; i > 0; i--) {
38248- temp = info_p->access.fifo_full(info_p);
38249+ temp = info_p->access->fifo_full(info_p);
38250 if (temp != 0) {
38251 break;
38252 }
38253@@ -1465,7 +1465,7 @@ DBG(
38254 /*
38255 * Send the cmd
38256 */
38257- info_p->access.submit_command(info_p, c);
38258+ info_p->access->submit_command(info_p, c);
38259 complete = pollcomplete(ctlr);
38260
38261 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
38262@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
38263 * we check the new geometry. Then turn interrupts back on when
38264 * we're done.
38265 */
38266- host->access.set_intr_mask(host, 0);
38267+ host->access->set_intr_mask(host, 0);
38268 getgeometry(ctlr);
38269- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
38270+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
38271
38272 for(i=0; i<NWD; i++) {
38273 struct gendisk *disk = ida_gendisk[ctlr][i];
38274@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
38275 /* Wait (up to 2 seconds) for a command to complete */
38276
38277 for (i = 200000; i > 0; i--) {
38278- done = hba[ctlr]->access.command_completed(hba[ctlr]);
38279+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
38280 if (done == 0) {
38281 udelay(10); /* a short fixed delay */
38282 } else
38283diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
38284index be73e9d..7fbf140 100644
38285--- a/drivers/block/cpqarray.h
38286+++ b/drivers/block/cpqarray.h
38287@@ -99,7 +99,7 @@ struct ctlr_info {
38288 drv_info_t drv[NWD];
38289 struct proc_dir_entry *proc;
38290
38291- struct access_method access;
38292+ struct access_method *access;
38293
38294 cmdlist_t *reqQ;
38295 cmdlist_t *cmpQ;
38296diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
38297index 434c77d..6d3219a 100644
38298--- a/drivers/block/drbd/drbd_bitmap.c
38299+++ b/drivers/block/drbd/drbd_bitmap.c
38300@@ -1036,7 +1036,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
38301 submit_bio(rw, bio);
38302 /* this should not count as user activity and cause the
38303 * resync to throttle -- see drbd_rs_should_slow_down(). */
38304- atomic_add(len >> 9, &device->rs_sect_ev);
38305+ atomic_add_unchecked(len >> 9, &device->rs_sect_ev);
38306 }
38307 }
38308
38309diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
38310index b905e98..0812ed8 100644
38311--- a/drivers/block/drbd/drbd_int.h
38312+++ b/drivers/block/drbd/drbd_int.h
38313@@ -385,7 +385,7 @@ struct drbd_epoch {
38314 struct drbd_connection *connection;
38315 struct list_head list;
38316 unsigned int barrier_nr;
38317- atomic_t epoch_size; /* increased on every request added. */
38318+ atomic_unchecked_t epoch_size; /* increased on every request added. */
38319 atomic_t active; /* increased on every req. added, and dec on every finished. */
38320 unsigned long flags;
38321 };
38322@@ -946,7 +946,7 @@ struct drbd_device {
38323 unsigned int al_tr_number;
38324 int al_tr_cycle;
38325 wait_queue_head_t seq_wait;
38326- atomic_t packet_seq;
38327+ atomic_unchecked_t packet_seq;
38328 unsigned int peer_seq;
38329 spinlock_t peer_seq_lock;
38330 unsigned long comm_bm_set; /* communicated number of set bits. */
38331@@ -955,8 +955,8 @@ struct drbd_device {
38332 struct mutex own_state_mutex;
38333 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
38334 char congestion_reason; /* Why we where congested... */
38335- atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38336- atomic_t rs_sect_ev; /* for submitted resync data rate, both */
38337+ atomic_unchecked_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38338+ atomic_unchecked_t rs_sect_ev; /* for submitted resync data rate, both */
38339 int rs_last_sect_ev; /* counter to compare with */
38340 int rs_last_events; /* counter of read or write "events" (unit sectors)
38341 * on the lower level device when we last looked. */
38342diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
38343index 1fc8342..7e7742b 100644
38344--- a/drivers/block/drbd/drbd_main.c
38345+++ b/drivers/block/drbd/drbd_main.c
38346@@ -1328,7 +1328,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
38347 p->sector = sector;
38348 p->block_id = block_id;
38349 p->blksize = blksize;
38350- p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
38351+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&peer_device->device->packet_seq));
38352 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
38353 }
38354
38355@@ -1634,7 +1634,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
38356 return -EIO;
38357 p->sector = cpu_to_be64(req->i.sector);
38358 p->block_id = (unsigned long)req;
38359- p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
38360+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&device->packet_seq));
38361 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
38362 if (device->state.conn >= C_SYNC_SOURCE &&
38363 device->state.conn <= C_PAUSED_SYNC_T)
38364@@ -1915,8 +1915,8 @@ void drbd_init_set_defaults(struct drbd_device *device)
38365 atomic_set(&device->unacked_cnt, 0);
38366 atomic_set(&device->local_cnt, 0);
38367 atomic_set(&device->pp_in_use_by_net, 0);
38368- atomic_set(&device->rs_sect_in, 0);
38369- atomic_set(&device->rs_sect_ev, 0);
38370+ atomic_set_unchecked(&device->rs_sect_in, 0);
38371+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38372 atomic_set(&device->ap_in_flight, 0);
38373 atomic_set(&device->md_io.in_use, 0);
38374
38375@@ -2684,8 +2684,8 @@ void drbd_destroy_connection(struct kref *kref)
38376 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
38377 struct drbd_resource *resource = connection->resource;
38378
38379- if (atomic_read(&connection->current_epoch->epoch_size) != 0)
38380- drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
38381+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size) != 0)
38382+ drbd_err(connection, "epoch_size:%d\n", atomic_read_unchecked(&connection->current_epoch->epoch_size));
38383 kfree(connection->current_epoch);
38384
38385 idr_destroy(&connection->peer_devices);
38386diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
38387index 74df8cf..e41fc24 100644
38388--- a/drivers/block/drbd/drbd_nl.c
38389+++ b/drivers/block/drbd/drbd_nl.c
38390@@ -3637,13 +3637,13 @@ finish:
38391
38392 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
38393 {
38394- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38395+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38396 struct sk_buff *msg;
38397 struct drbd_genlmsghdr *d_out;
38398 unsigned seq;
38399 int err = -ENOMEM;
38400
38401- seq = atomic_inc_return(&drbd_genl_seq);
38402+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
38403 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
38404 if (!msg)
38405 goto failed;
38406diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
38407index cee2035..22f66bd 100644
38408--- a/drivers/block/drbd/drbd_receiver.c
38409+++ b/drivers/block/drbd/drbd_receiver.c
38410@@ -870,7 +870,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
38411 struct drbd_device *device = peer_device->device;
38412 int err;
38413
38414- atomic_set(&device->packet_seq, 0);
38415+ atomic_set_unchecked(&device->packet_seq, 0);
38416 device->peer_seq = 0;
38417
38418 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
38419@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38420 do {
38421 next_epoch = NULL;
38422
38423- epoch_size = atomic_read(&epoch->epoch_size);
38424+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
38425
38426 switch (ev & ~EV_CLEANUP) {
38427 case EV_PUT:
38428@@ -1273,7 +1273,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38429 rv = FE_DESTROYED;
38430 } else {
38431 epoch->flags = 0;
38432- atomic_set(&epoch->epoch_size, 0);
38433+ atomic_set_unchecked(&epoch->epoch_size, 0);
38434 /* atomic_set(&epoch->active, 0); is already zero */
38435 if (rv == FE_STILL_LIVE)
38436 rv = FE_RECYCLED;
38437@@ -1550,7 +1550,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38438 conn_wait_active_ee_empty(connection);
38439 drbd_flush(connection);
38440
38441- if (atomic_read(&connection->current_epoch->epoch_size)) {
38442+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38443 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
38444 if (epoch)
38445 break;
38446@@ -1564,11 +1564,11 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38447 }
38448
38449 epoch->flags = 0;
38450- atomic_set(&epoch->epoch_size, 0);
38451+ atomic_set_unchecked(&epoch->epoch_size, 0);
38452 atomic_set(&epoch->active, 0);
38453
38454 spin_lock(&connection->epoch_lock);
38455- if (atomic_read(&connection->current_epoch->epoch_size)) {
38456+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38457 list_add(&epoch->list, &connection->current_epoch->list);
38458 connection->current_epoch = epoch;
38459 connection->epochs++;
38460@@ -1802,7 +1802,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
38461 list_add_tail(&peer_req->w.list, &device->sync_ee);
38462 spin_unlock_irq(&device->resource->req_lock);
38463
38464- atomic_add(pi->size >> 9, &device->rs_sect_ev);
38465+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_ev);
38466 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
38467 return 0;
38468
38469@@ -1900,7 +1900,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet
38470 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38471 }
38472
38473- atomic_add(pi->size >> 9, &device->rs_sect_in);
38474+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_in);
38475
38476 return err;
38477 }
38478@@ -2290,7 +2290,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38479
38480 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
38481 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38482- atomic_inc(&connection->current_epoch->epoch_size);
38483+ atomic_inc_unchecked(&connection->current_epoch->epoch_size);
38484 err2 = drbd_drain_block(peer_device, pi->size);
38485 if (!err)
38486 err = err2;
38487@@ -2334,7 +2334,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38488
38489 spin_lock(&connection->epoch_lock);
38490 peer_req->epoch = connection->current_epoch;
38491- atomic_inc(&peer_req->epoch->epoch_size);
38492+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
38493 atomic_inc(&peer_req->epoch->active);
38494 spin_unlock(&connection->epoch_lock);
38495
38496@@ -2479,7 +2479,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
38497
38498 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
38499 (int)part_stat_read(&disk->part0, sectors[1]) -
38500- atomic_read(&device->rs_sect_ev);
38501+ atomic_read_unchecked(&device->rs_sect_ev);
38502
38503 if (atomic_read(&device->ap_actlog_cnt)
38504 || curr_events - device->rs_last_events > 64) {
38505@@ -2618,7 +2618,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38506 device->use_csums = true;
38507 } else if (pi->cmd == P_OV_REPLY) {
38508 /* track progress, we may need to throttle */
38509- atomic_add(size >> 9, &device->rs_sect_in);
38510+ atomic_add_unchecked(size >> 9, &device->rs_sect_in);
38511 peer_req->w.cb = w_e_end_ov_reply;
38512 dec_rs_pending(device);
38513 /* drbd_rs_begin_io done when we sent this request,
38514@@ -2691,7 +2691,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38515 goto out_free_e;
38516
38517 submit_for_resync:
38518- atomic_add(size >> 9, &device->rs_sect_ev);
38519+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38520
38521 submit:
38522 update_receiver_timing_details(connection, drbd_submit_peer_request);
38523@@ -4564,7 +4564,7 @@ struct data_cmd {
38524 int expect_payload;
38525 size_t pkt_size;
38526 int (*fn)(struct drbd_connection *, struct packet_info *);
38527-};
38528+} __do_const;
38529
38530 static struct data_cmd drbd_cmd_handler[] = {
38531 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
38532@@ -4678,7 +4678,7 @@ static void conn_disconnect(struct drbd_connection *connection)
38533 if (!list_empty(&connection->current_epoch->list))
38534 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
38535 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
38536- atomic_set(&connection->current_epoch->epoch_size, 0);
38537+ atomic_set_unchecked(&connection->current_epoch->epoch_size, 0);
38538 connection->send.seen_any_write_yet = false;
38539
38540 drbd_info(connection, "Connection closed\n");
38541@@ -5182,7 +5182,7 @@ static int got_IsInSync(struct drbd_connection *connection, struct packet_info *
38542 put_ldev(device);
38543 }
38544 dec_rs_pending(device);
38545- atomic_add(blksize >> 9, &device->rs_sect_in);
38546+ atomic_add_unchecked(blksize >> 9, &device->rs_sect_in);
38547
38548 return 0;
38549 }
38550@@ -5470,7 +5470,7 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
38551 struct asender_cmd {
38552 size_t pkt_size;
38553 int (*fn)(struct drbd_connection *connection, struct packet_info *);
38554-};
38555+} __do_const;
38556
38557 static struct asender_cmd asender_tbl[] = {
38558 [P_PING] = { 0, got_Ping },
38559diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
38560index d0fae55..4469096 100644
38561--- a/drivers/block/drbd/drbd_worker.c
38562+++ b/drivers/block/drbd/drbd_worker.c
38563@@ -408,7 +408,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
38564 list_add_tail(&peer_req->w.list, &device->read_ee);
38565 spin_unlock_irq(&device->resource->req_lock);
38566
38567- atomic_add(size >> 9, &device->rs_sect_ev);
38568+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38569 if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
38570 return 0;
38571
38572@@ -553,7 +553,7 @@ static int drbd_rs_number_requests(struct drbd_device *device)
38573 unsigned int sect_in; /* Number of sectors that came in since the last turn */
38574 int number, mxb;
38575
38576- sect_in = atomic_xchg(&device->rs_sect_in, 0);
38577+ sect_in = atomic_xchg_unchecked(&device->rs_sect_in, 0);
38578 device->rs_in_flight -= sect_in;
38579
38580 rcu_read_lock();
38581@@ -1595,8 +1595,8 @@ void drbd_rs_controller_reset(struct drbd_device *device)
38582 struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
38583 struct fifo_buffer *plan;
38584
38585- atomic_set(&device->rs_sect_in, 0);
38586- atomic_set(&device->rs_sect_ev, 0);
38587+ atomic_set_unchecked(&device->rs_sect_in, 0);
38588+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38589 device->rs_in_flight = 0;
38590 device->rs_last_events =
38591 (int)part_stat_read(&disk->part0, sectors[0]) +
38592diff --git a/drivers/block/loop.c b/drivers/block/loop.c
38593index 773e964..e85af00 100644
38594--- a/drivers/block/loop.c
38595+++ b/drivers/block/loop.c
38596@@ -234,7 +234,7 @@ static int __do_lo_send_write(struct file *file,
38597
38598 file_start_write(file);
38599 set_fs(get_ds());
38600- bw = file->f_op->write(file, buf, len, &pos);
38601+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
38602 set_fs(old_fs);
38603 file_end_write(file);
38604 if (likely(bw == len))
38605diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
38606index 09e628da..7607aaa 100644
38607--- a/drivers/block/pktcdvd.c
38608+++ b/drivers/block/pktcdvd.c
38609@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
38610
38611 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
38612 {
38613- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
38614+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
38615 }
38616
38617 /*
38618@@ -1890,7 +1890,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
38619 return -EROFS;
38620 }
38621 pd->settings.fp = ti.fp;
38622- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
38623+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
38624
38625 if (ti.nwa_v) {
38626 pd->nwa = be32_to_cpu(ti.next_writable);
38627diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
38628index b67066d..515b7f4 100644
38629--- a/drivers/block/rbd.c
38630+++ b/drivers/block/rbd.c
38631@@ -64,7 +64,7 @@
38632 * If the counter is already at its maximum value returns
38633 * -EINVAL without updating it.
38634 */
38635-static int atomic_inc_return_safe(atomic_t *v)
38636+static int __intentional_overflow(-1) atomic_inc_return_safe(atomic_t *v)
38637 {
38638 unsigned int counter;
38639
38640diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
38641index e5565fb..71be10b4 100644
38642--- a/drivers/block/smart1,2.h
38643+++ b/drivers/block/smart1,2.h
38644@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
38645 }
38646
38647 static struct access_method smart4_access = {
38648- smart4_submit_command,
38649- smart4_intr_mask,
38650- smart4_fifo_full,
38651- smart4_intr_pending,
38652- smart4_completed,
38653+ .submit_command = smart4_submit_command,
38654+ .set_intr_mask = smart4_intr_mask,
38655+ .fifo_full = smart4_fifo_full,
38656+ .intr_pending = smart4_intr_pending,
38657+ .command_completed = smart4_completed,
38658 };
38659
38660 /*
38661@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
38662 }
38663
38664 static struct access_method smart2_access = {
38665- smart2_submit_command,
38666- smart2_intr_mask,
38667- smart2_fifo_full,
38668- smart2_intr_pending,
38669- smart2_completed,
38670+ .submit_command = smart2_submit_command,
38671+ .set_intr_mask = smart2_intr_mask,
38672+ .fifo_full = smart2_fifo_full,
38673+ .intr_pending = smart2_intr_pending,
38674+ .command_completed = smart2_completed,
38675 };
38676
38677 /*
38678@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
38679 }
38680
38681 static struct access_method smart2e_access = {
38682- smart2e_submit_command,
38683- smart2e_intr_mask,
38684- smart2e_fifo_full,
38685- smart2e_intr_pending,
38686- smart2e_completed,
38687+ .submit_command = smart2e_submit_command,
38688+ .set_intr_mask = smart2e_intr_mask,
38689+ .fifo_full = smart2e_fifo_full,
38690+ .intr_pending = smart2e_intr_pending,
38691+ .command_completed = smart2e_completed,
38692 };
38693
38694 /*
38695@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
38696 }
38697
38698 static struct access_method smart1_access = {
38699- smart1_submit_command,
38700- smart1_intr_mask,
38701- smart1_fifo_full,
38702- smart1_intr_pending,
38703- smart1_completed,
38704+ .submit_command = smart1_submit_command,
38705+ .set_intr_mask = smart1_intr_mask,
38706+ .fifo_full = smart1_fifo_full,
38707+ .intr_pending = smart1_intr_pending,
38708+ .command_completed = smart1_completed,
38709 };
38710diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
38711index 55c135b..9f8d60c 100644
38712--- a/drivers/bluetooth/btwilink.c
38713+++ b/drivers/bluetooth/btwilink.c
38714@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
38715
38716 static int bt_ti_probe(struct platform_device *pdev)
38717 {
38718- static struct ti_st *hst;
38719+ struct ti_st *hst;
38720 struct hci_dev *hdev;
38721 int err;
38722
38723diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
38724index 5d28a45..a538f90 100644
38725--- a/drivers/cdrom/cdrom.c
38726+++ b/drivers/cdrom/cdrom.c
38727@@ -610,7 +610,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
38728 ENSURE(reset, CDC_RESET);
38729 ENSURE(generic_packet, CDC_GENERIC_PACKET);
38730 cdi->mc_flags = 0;
38731- cdo->n_minors = 0;
38732 cdi->options = CDO_USE_FFLAGS;
38733
38734 if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
38735@@ -630,8 +629,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
38736 else
38737 cdi->cdda_method = CDDA_OLD;
38738
38739- if (!cdo->generic_packet)
38740- cdo->generic_packet = cdrom_dummy_generic_packet;
38741+ if (!cdo->generic_packet) {
38742+ pax_open_kernel();
38743+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
38744+ pax_close_kernel();
38745+ }
38746
38747 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
38748 mutex_lock(&cdrom_mutex);
38749@@ -652,7 +654,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
38750 if (cdi->exit)
38751 cdi->exit(cdi);
38752
38753- cdi->ops->n_minors--;
38754 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
38755 }
38756
38757@@ -2126,7 +2127,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
38758 */
38759 nr = nframes;
38760 do {
38761- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38762+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38763 if (cgc.buffer)
38764 break;
38765
38766@@ -3434,7 +3435,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
38767 struct cdrom_device_info *cdi;
38768 int ret;
38769
38770- ret = scnprintf(info + *pos, max_size - *pos, header);
38771+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
38772 if (!ret)
38773 return 1;
38774
38775diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
38776index 584bc31..e64a12c 100644
38777--- a/drivers/cdrom/gdrom.c
38778+++ b/drivers/cdrom/gdrom.c
38779@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
38780 .audio_ioctl = gdrom_audio_ioctl,
38781 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
38782 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
38783- .n_minors = 1,
38784 };
38785
38786 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
38787diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
38788index a4af822..ed58cd1 100644
38789--- a/drivers/char/Kconfig
38790+++ b/drivers/char/Kconfig
38791@@ -17,7 +17,8 @@ config DEVMEM
38792
38793 config DEVKMEM
38794 bool "/dev/kmem virtual device support"
38795- default y
38796+ default n
38797+ depends on !GRKERNSEC_KMEM
38798 help
38799 Say Y here if you want to support the /dev/kmem device. The
38800 /dev/kmem device is rarely used, but can be used for certain
38801@@ -586,6 +587,7 @@ config DEVPORT
38802 bool
38803 depends on !M68K
38804 depends on ISA || PCI
38805+ depends on !GRKERNSEC_KMEM
38806 default y
38807
38808 source "drivers/s390/char/Kconfig"
38809diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
38810index a48e05b..6bac831 100644
38811--- a/drivers/char/agp/compat_ioctl.c
38812+++ b/drivers/char/agp/compat_ioctl.c
38813@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
38814 return -ENOMEM;
38815 }
38816
38817- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
38818+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
38819 sizeof(*usegment) * ureserve.seg_count)) {
38820 kfree(usegment);
38821 kfree(ksegment);
38822diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
38823index 09f17eb..8531d2f 100644
38824--- a/drivers/char/agp/frontend.c
38825+++ b/drivers/char/agp/frontend.c
38826@@ -806,7 +806,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38827 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
38828 return -EFAULT;
38829
38830- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
38831+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
38832 return -EFAULT;
38833
38834 client = agp_find_client_by_pid(reserve.pid);
38835@@ -836,7 +836,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38836 if (segment == NULL)
38837 return -ENOMEM;
38838
38839- if (copy_from_user(segment, (void __user *) reserve.seg_list,
38840+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
38841 sizeof(struct agp_segment) * reserve.seg_count)) {
38842 kfree(segment);
38843 return -EFAULT;
38844diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
38845index 4f94375..413694e 100644
38846--- a/drivers/char/genrtc.c
38847+++ b/drivers/char/genrtc.c
38848@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
38849 switch (cmd) {
38850
38851 case RTC_PLL_GET:
38852+ memset(&pll, 0, sizeof(pll));
38853 if (get_rtc_pll(&pll))
38854 return -EINVAL;
38855 else
38856diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
38857index 5c0baa9..44011b1 100644
38858--- a/drivers/char/hpet.c
38859+++ b/drivers/char/hpet.c
38860@@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
38861 }
38862
38863 static int
38864-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
38865+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
38866 struct hpet_info *info)
38867 {
38868 struct hpet_timer __iomem *timer;
38869diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
38870index 24cc4ed..f9807cf 100644
38871--- a/drivers/char/i8k.c
38872+++ b/drivers/char/i8k.c
38873@@ -788,7 +788,7 @@ static const struct i8k_config_data i8k_config_data[] = {
38874 },
38875 };
38876
38877-static struct dmi_system_id i8k_dmi_table[] __initdata = {
38878+static const struct dmi_system_id i8k_dmi_table[] __initconst = {
38879 {
38880 .ident = "Dell Inspiron",
38881 .matches = {
38882diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
38883index 9bb5928..57a7801 100644
38884--- a/drivers/char/ipmi/ipmi_msghandler.c
38885+++ b/drivers/char/ipmi/ipmi_msghandler.c
38886@@ -436,7 +436,7 @@ struct ipmi_smi {
38887 struct proc_dir_entry *proc_dir;
38888 char proc_dir_name[10];
38889
38890- atomic_t stats[IPMI_NUM_STATS];
38891+ atomic_unchecked_t stats[IPMI_NUM_STATS];
38892
38893 /*
38894 * run_to_completion duplicate of smb_info, smi_info
38895@@ -468,9 +468,9 @@ static LIST_HEAD(smi_watchers);
38896 static DEFINE_MUTEX(smi_watchers_mutex);
38897
38898 #define ipmi_inc_stat(intf, stat) \
38899- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
38900+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
38901 #define ipmi_get_stat(intf, stat) \
38902- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
38903+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
38904
38905 static char *addr_src_to_str[] = { "invalid", "hotmod", "hardcoded", "SPMI",
38906 "ACPI", "SMBIOS", "PCI",
38907@@ -2828,7 +2828,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
38908 INIT_LIST_HEAD(&intf->cmd_rcvrs);
38909 init_waitqueue_head(&intf->waitq);
38910 for (i = 0; i < IPMI_NUM_STATS; i++)
38911- atomic_set(&intf->stats[i], 0);
38912+ atomic_set_unchecked(&intf->stats[i], 0);
38913
38914 intf->proc_dir = NULL;
38915
38916diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
38917index 518585c..6c985cef 100644
38918--- a/drivers/char/ipmi/ipmi_si_intf.c
38919+++ b/drivers/char/ipmi/ipmi_si_intf.c
38920@@ -289,7 +289,7 @@ struct smi_info {
38921 unsigned char slave_addr;
38922
38923 /* Counters and things for the proc filesystem. */
38924- atomic_t stats[SI_NUM_STATS];
38925+ atomic_unchecked_t stats[SI_NUM_STATS];
38926
38927 struct task_struct *thread;
38928
38929@@ -298,9 +298,9 @@ struct smi_info {
38930 };
38931
38932 #define smi_inc_stat(smi, stat) \
38933- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
38934+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
38935 #define smi_get_stat(smi, stat) \
38936- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
38937+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
38938
38939 #define SI_MAX_PARMS 4
38940
38941@@ -3498,7 +3498,7 @@ static int try_smi_init(struct smi_info *new_smi)
38942 atomic_set(&new_smi->req_events, 0);
38943 new_smi->run_to_completion = false;
38944 for (i = 0; i < SI_NUM_STATS; i++)
38945- atomic_set(&new_smi->stats[i], 0);
38946+ atomic_set_unchecked(&new_smi->stats[i], 0);
38947
38948 new_smi->interrupt_disabled = true;
38949 atomic_set(&new_smi->need_watch, 0);
38950diff --git a/drivers/char/mem.c b/drivers/char/mem.c
38951index 297110c..3f69b43 100644
38952--- a/drivers/char/mem.c
38953+++ b/drivers/char/mem.c
38954@@ -18,6 +18,7 @@
38955 #include <linux/raw.h>
38956 #include <linux/tty.h>
38957 #include <linux/capability.h>
38958+#include <linux/security.h>
38959 #include <linux/ptrace.h>
38960 #include <linux/device.h>
38961 #include <linux/highmem.h>
38962@@ -36,6 +37,10 @@
38963
38964 #define DEVPORT_MINOR 4
38965
38966+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38967+extern const struct file_operations grsec_fops;
38968+#endif
38969+
38970 static inline unsigned long size_inside_page(unsigned long start,
38971 unsigned long size)
38972 {
38973@@ -67,9 +72,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38974
38975 while (cursor < to) {
38976 if (!devmem_is_allowed(pfn)) {
38977+#ifdef CONFIG_GRKERNSEC_KMEM
38978+ gr_handle_mem_readwrite(from, to);
38979+#else
38980 printk(KERN_INFO
38981 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
38982 current->comm, from, to);
38983+#endif
38984 return 0;
38985 }
38986 cursor += PAGE_SIZE;
38987@@ -77,6 +86,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38988 }
38989 return 1;
38990 }
38991+#elif defined(CONFIG_GRKERNSEC_KMEM)
38992+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38993+{
38994+ return 0;
38995+}
38996 #else
38997 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38998 {
38999@@ -124,7 +138,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
39000 #endif
39001
39002 while (count > 0) {
39003- unsigned long remaining;
39004+ unsigned long remaining = 0;
39005+ char *temp;
39006
39007 sz = size_inside_page(p, count);
39008
39009@@ -140,7 +155,24 @@ static ssize_t read_mem(struct file *file, char __user *buf,
39010 if (!ptr)
39011 return -EFAULT;
39012
39013- remaining = copy_to_user(buf, ptr, sz);
39014+#ifdef CONFIG_PAX_USERCOPY
39015+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
39016+ if (!temp) {
39017+ unxlate_dev_mem_ptr(p, ptr);
39018+ return -ENOMEM;
39019+ }
39020+ remaining = probe_kernel_read(temp, ptr, sz);
39021+#else
39022+ temp = ptr;
39023+#endif
39024+
39025+ if (!remaining)
39026+ remaining = copy_to_user(buf, temp, sz);
39027+
39028+#ifdef CONFIG_PAX_USERCOPY
39029+ kfree(temp);
39030+#endif
39031+
39032 unxlate_dev_mem_ptr(p, ptr);
39033 if (remaining)
39034 return -EFAULT;
39035@@ -380,9 +412,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
39036 size_t count, loff_t *ppos)
39037 {
39038 unsigned long p = *ppos;
39039- ssize_t low_count, read, sz;
39040+ ssize_t low_count, read, sz, err = 0;
39041 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
39042- int err = 0;
39043
39044 read = 0;
39045 if (p < (unsigned long) high_memory) {
39046@@ -404,6 +435,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
39047 }
39048 #endif
39049 while (low_count > 0) {
39050+ char *temp;
39051+
39052 sz = size_inside_page(p, low_count);
39053
39054 /*
39055@@ -413,7 +446,23 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
39056 */
39057 kbuf = xlate_dev_kmem_ptr((void *)p);
39058
39059- if (copy_to_user(buf, kbuf, sz))
39060+#ifdef CONFIG_PAX_USERCOPY
39061+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
39062+ if (!temp)
39063+ return -ENOMEM;
39064+ err = probe_kernel_read(temp, kbuf, sz);
39065+#else
39066+ temp = kbuf;
39067+#endif
39068+
39069+ if (!err)
39070+ err = copy_to_user(buf, temp, sz);
39071+
39072+#ifdef CONFIG_PAX_USERCOPY
39073+ kfree(temp);
39074+#endif
39075+
39076+ if (err)
39077 return -EFAULT;
39078 buf += sz;
39079 p += sz;
39080@@ -804,6 +853,9 @@ static const struct memdev {
39081 #ifdef CONFIG_PRINTK
39082 [11] = { "kmsg", 0644, &kmsg_fops, 0 },
39083 #endif
39084+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
39085+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, 0 },
39086+#endif
39087 };
39088
39089 static int memory_open(struct inode *inode, struct file *filp)
39090@@ -865,7 +917,7 @@ static int __init chr_dev_init(void)
39091 continue;
39092
39093 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
39094- NULL, devlist[minor].name);
39095+ NULL, "%s", devlist[minor].name);
39096 }
39097
39098 return tty_init();
39099diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
39100index 9df78e2..01ba9ae 100644
39101--- a/drivers/char/nvram.c
39102+++ b/drivers/char/nvram.c
39103@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
39104
39105 spin_unlock_irq(&rtc_lock);
39106
39107- if (copy_to_user(buf, contents, tmp - contents))
39108+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
39109 return -EFAULT;
39110
39111 *ppos = i;
39112diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
39113index 0ea9986..e7b07e4 100644
39114--- a/drivers/char/pcmcia/synclink_cs.c
39115+++ b/drivers/char/pcmcia/synclink_cs.c
39116@@ -2345,7 +2345,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
39117
39118 if (debug_level >= DEBUG_LEVEL_INFO)
39119 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
39120- __FILE__, __LINE__, info->device_name, port->count);
39121+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
39122
39123 if (tty_port_close_start(port, tty, filp) == 0)
39124 goto cleanup;
39125@@ -2363,7 +2363,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
39126 cleanup:
39127 if (debug_level >= DEBUG_LEVEL_INFO)
39128 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
39129- tty->driver->name, port->count);
39130+ tty->driver->name, atomic_read(&port->count));
39131 }
39132
39133 /* Wait until the transmitter is empty.
39134@@ -2505,7 +2505,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
39135
39136 if (debug_level >= DEBUG_LEVEL_INFO)
39137 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
39138- __FILE__, __LINE__, tty->driver->name, port->count);
39139+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
39140
39141 /* If port is closing, signal caller to try again */
39142 if (port->flags & ASYNC_CLOSING){
39143@@ -2525,11 +2525,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
39144 goto cleanup;
39145 }
39146 spin_lock(&port->lock);
39147- port->count++;
39148+ atomic_inc(&port->count);
39149 spin_unlock(&port->lock);
39150 spin_unlock_irqrestore(&info->netlock, flags);
39151
39152- if (port->count == 1) {
39153+ if (atomic_read(&port->count) == 1) {
39154 /* 1st open on this device, init hardware */
39155 retval = startup(info, tty);
39156 if (retval < 0)
39157@@ -3918,7 +3918,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
39158 unsigned short new_crctype;
39159
39160 /* return error if TTY interface open */
39161- if (info->port.count)
39162+ if (atomic_read(&info->port.count))
39163 return -EBUSY;
39164
39165 switch (encoding)
39166@@ -4022,7 +4022,7 @@ static int hdlcdev_open(struct net_device *dev)
39167
39168 /* arbitrate between network and tty opens */
39169 spin_lock_irqsave(&info->netlock, flags);
39170- if (info->port.count != 0 || info->netcount != 0) {
39171+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
39172 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
39173 spin_unlock_irqrestore(&info->netlock, flags);
39174 return -EBUSY;
39175@@ -4112,7 +4112,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39176 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
39177
39178 /* return error if TTY interface open */
39179- if (info->port.count)
39180+ if (atomic_read(&info->port.count))
39181 return -EBUSY;
39182
39183 if (cmd != SIOCWANDEV)
39184diff --git a/drivers/char/random.c b/drivers/char/random.c
39185index 9cd6968..6416f00 100644
39186--- a/drivers/char/random.c
39187+++ b/drivers/char/random.c
39188@@ -289,9 +289,6 @@
39189 /*
39190 * To allow fractional bits to be tracked, the entropy_count field is
39191 * denominated in units of 1/8th bits.
39192- *
39193- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
39194- * credit_entropy_bits() needs to be 64 bits wide.
39195 */
39196 #define ENTROPY_SHIFT 3
39197 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
39198@@ -439,9 +436,9 @@ struct entropy_store {
39199 };
39200
39201 static void push_to_pool(struct work_struct *work);
39202-static __u32 input_pool_data[INPUT_POOL_WORDS];
39203-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
39204-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
39205+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
39206+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39207+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39208
39209 static struct entropy_store input_pool = {
39210 .poolinfo = &poolinfo_table[0],
39211@@ -635,7 +632,7 @@ retry:
39212 /* The +2 corresponds to the /4 in the denominator */
39213
39214 do {
39215- unsigned int anfrac = min(pnfrac, pool_size/2);
39216+ u64 anfrac = min(pnfrac, pool_size/2);
39217 unsigned int add =
39218 ((pool_size - entropy_count)*anfrac*3) >> s;
39219
39220@@ -1207,7 +1204,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
39221
39222 extract_buf(r, tmp);
39223 i = min_t(int, nbytes, EXTRACT_SIZE);
39224- if (copy_to_user(buf, tmp, i)) {
39225+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
39226 ret = -EFAULT;
39227 break;
39228 }
39229@@ -1590,7 +1587,7 @@ static char sysctl_bootid[16];
39230 static int proc_do_uuid(struct ctl_table *table, int write,
39231 void __user *buffer, size_t *lenp, loff_t *ppos)
39232 {
39233- struct ctl_table fake_table;
39234+ ctl_table_no_const fake_table;
39235 unsigned char buf[64], tmp_uuid[16], *uuid;
39236
39237 uuid = table->data;
39238@@ -1620,7 +1617,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
39239 static int proc_do_entropy(struct ctl_table *table, int write,
39240 void __user *buffer, size_t *lenp, loff_t *ppos)
39241 {
39242- struct ctl_table fake_table;
39243+ ctl_table_no_const fake_table;
39244 int entropy_count;
39245
39246 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
39247diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
39248index e496dae..3db53b6 100644
39249--- a/drivers/char/sonypi.c
39250+++ b/drivers/char/sonypi.c
39251@@ -54,6 +54,7 @@
39252
39253 #include <asm/uaccess.h>
39254 #include <asm/io.h>
39255+#include <asm/local.h>
39256
39257 #include <linux/sonypi.h>
39258
39259@@ -490,7 +491,7 @@ static struct sonypi_device {
39260 spinlock_t fifo_lock;
39261 wait_queue_head_t fifo_proc_list;
39262 struct fasync_struct *fifo_async;
39263- int open_count;
39264+ local_t open_count;
39265 int model;
39266 struct input_dev *input_jog_dev;
39267 struct input_dev *input_key_dev;
39268@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
39269 static int sonypi_misc_release(struct inode *inode, struct file *file)
39270 {
39271 mutex_lock(&sonypi_device.lock);
39272- sonypi_device.open_count--;
39273+ local_dec(&sonypi_device.open_count);
39274 mutex_unlock(&sonypi_device.lock);
39275 return 0;
39276 }
39277@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
39278 {
39279 mutex_lock(&sonypi_device.lock);
39280 /* Flush input queue on first open */
39281- if (!sonypi_device.open_count)
39282+ if (!local_read(&sonypi_device.open_count))
39283 kfifo_reset(&sonypi_device.fifo);
39284- sonypi_device.open_count++;
39285+ local_inc(&sonypi_device.open_count);
39286 mutex_unlock(&sonypi_device.lock);
39287
39288 return 0;
39289@@ -1491,7 +1492,7 @@ static struct platform_driver sonypi_driver = {
39290
39291 static struct platform_device *sonypi_platform_device;
39292
39293-static struct dmi_system_id __initdata sonypi_dmi_table[] = {
39294+static const struct dmi_system_id __initconst sonypi_dmi_table[] = {
39295 {
39296 .ident = "Sony Vaio",
39297 .matches = {
39298diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
39299index 565a947..dcdc06e 100644
39300--- a/drivers/char/tpm/tpm_acpi.c
39301+++ b/drivers/char/tpm/tpm_acpi.c
39302@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
39303 virt = acpi_os_map_iomem(start, len);
39304 if (!virt) {
39305 kfree(log->bios_event_log);
39306+ log->bios_event_log = NULL;
39307 printk("%s: ERROR - Unable to map memory\n", __func__);
39308 return -EIO;
39309 }
39310
39311- memcpy_fromio(log->bios_event_log, virt, len);
39312+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
39313
39314 acpi_os_unmap_iomem(virt, len);
39315 return 0;
39316diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
39317index 3a56a13..f8cbd25 100644
39318--- a/drivers/char/tpm/tpm_eventlog.c
39319+++ b/drivers/char/tpm/tpm_eventlog.c
39320@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
39321 event = addr;
39322
39323 if ((event->event_type == 0 && event->event_size == 0) ||
39324- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
39325+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
39326 return NULL;
39327
39328 return addr;
39329@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
39330 return NULL;
39331
39332 if ((event->event_type == 0 && event->event_size == 0) ||
39333- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
39334+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
39335 return NULL;
39336
39337 (*pos)++;
39338@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
39339 int i;
39340
39341 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
39342- seq_putc(m, data[i]);
39343+ if (!seq_putc(m, data[i]))
39344+ return -EFAULT;
39345
39346 return 0;
39347 }
39348diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
39349index 72d7028..1586601 100644
39350--- a/drivers/char/virtio_console.c
39351+++ b/drivers/char/virtio_console.c
39352@@ -685,7 +685,7 @@ static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
39353 if (to_user) {
39354 ssize_t ret;
39355
39356- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
39357+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
39358 if (ret)
39359 return -EFAULT;
39360 } else {
39361@@ -789,7 +789,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
39362 if (!port_has_data(port) && !port->host_connected)
39363 return 0;
39364
39365- return fill_readbuf(port, ubuf, count, true);
39366+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
39367 }
39368
39369 static int wait_port_writable(struct port *port, bool nonblock)
39370diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
39371index 956b7e5..b655045 100644
39372--- a/drivers/clk/clk-composite.c
39373+++ b/drivers/clk/clk-composite.c
39374@@ -197,7 +197,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
39375 struct clk *clk;
39376 struct clk_init_data init;
39377 struct clk_composite *composite;
39378- struct clk_ops *clk_composite_ops;
39379+ clk_ops_no_const *clk_composite_ops;
39380
39381 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
39382 if (!composite) {
39383diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c
39384index 2e4f6d4..b4cf487 100644
39385--- a/drivers/clk/hisilicon/clk-hi3620.c
39386+++ b/drivers/clk/hisilicon/clk-hi3620.c
39387@@ -38,44 +38,44 @@
39388 #include "clk.h"
39389
39390 /* clock parent list */
39391-static const char *timer0_mux_p[] __initconst = { "osc32k", "timerclk01", };
39392-static const char *timer1_mux_p[] __initconst = { "osc32k", "timerclk01", };
39393-static const char *timer2_mux_p[] __initconst = { "osc32k", "timerclk23", };
39394-static const char *timer3_mux_p[] __initconst = { "osc32k", "timerclk23", };
39395-static const char *timer4_mux_p[] __initconst = { "osc32k", "timerclk45", };
39396-static const char *timer5_mux_p[] __initconst = { "osc32k", "timerclk45", };
39397-static const char *timer6_mux_p[] __initconst = { "osc32k", "timerclk67", };
39398-static const char *timer7_mux_p[] __initconst = { "osc32k", "timerclk67", };
39399-static const char *timer8_mux_p[] __initconst = { "osc32k", "timerclk89", };
39400-static const char *timer9_mux_p[] __initconst = { "osc32k", "timerclk89", };
39401-static const char *uart0_mux_p[] __initconst = { "osc26m", "pclk", };
39402-static const char *uart1_mux_p[] __initconst = { "osc26m", "pclk", };
39403-static const char *uart2_mux_p[] __initconst = { "osc26m", "pclk", };
39404-static const char *uart3_mux_p[] __initconst = { "osc26m", "pclk", };
39405-static const char *uart4_mux_p[] __initconst = { "osc26m", "pclk", };
39406-static const char *spi0_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
39407-static const char *spi1_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
39408-static const char *spi2_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
39409+static const char * const timer0_mux_p[] __initconst = { "osc32k", "timerclk01", };
39410+static const char * const timer1_mux_p[] __initconst = { "osc32k", "timerclk01", };
39411+static const char * const timer2_mux_p[] __initconst = { "osc32k", "timerclk23", };
39412+static const char * const timer3_mux_p[] __initconst = { "osc32k", "timerclk23", };
39413+static const char * const timer4_mux_p[] __initconst = { "osc32k", "timerclk45", };
39414+static const char * const timer5_mux_p[] __initconst = { "osc32k", "timerclk45", };
39415+static const char * const timer6_mux_p[] __initconst = { "osc32k", "timerclk67", };
39416+static const char * const timer7_mux_p[] __initconst = { "osc32k", "timerclk67", };
39417+static const char * const timer8_mux_p[] __initconst = { "osc32k", "timerclk89", };
39418+static const char * const timer9_mux_p[] __initconst = { "osc32k", "timerclk89", };
39419+static const char * const uart0_mux_p[] __initconst = { "osc26m", "pclk", };
39420+static const char * const uart1_mux_p[] __initconst = { "osc26m", "pclk", };
39421+static const char * const uart2_mux_p[] __initconst = { "osc26m", "pclk", };
39422+static const char * const uart3_mux_p[] __initconst = { "osc26m", "pclk", };
39423+static const char * const uart4_mux_p[] __initconst = { "osc26m", "pclk", };
39424+static const char * const spi0_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
39425+static const char * const spi1_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
39426+static const char * const spi2_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
39427 /* share axi parent */
39428-static const char *saxi_mux_p[] __initconst = { "armpll3", "armpll2", };
39429-static const char *pwm0_mux_p[] __initconst = { "osc32k", "osc26m", };
39430-static const char *pwm1_mux_p[] __initconst = { "osc32k", "osc26m", };
39431-static const char *sd_mux_p[] __initconst = { "armpll2", "armpll3", };
39432-static const char *mmc1_mux_p[] __initconst = { "armpll2", "armpll3", };
39433-static const char *mmc1_mux2_p[] __initconst = { "osc26m", "mmc1_div", };
39434-static const char *g2d_mux_p[] __initconst = { "armpll2", "armpll3", };
39435-static const char *venc_mux_p[] __initconst = { "armpll2", "armpll3", };
39436-static const char *vdec_mux_p[] __initconst = { "armpll2", "armpll3", };
39437-static const char *vpp_mux_p[] __initconst = { "armpll2", "armpll3", };
39438-static const char *edc0_mux_p[] __initconst = { "armpll2", "armpll3", };
39439-static const char *ldi0_mux_p[] __initconst = { "armpll2", "armpll4",
39440+static const char * const saxi_mux_p[] __initconst = { "armpll3", "armpll2", };
39441+static const char * const pwm0_mux_p[] __initconst = { "osc32k", "osc26m", };
39442+static const char * const pwm1_mux_p[] __initconst = { "osc32k", "osc26m", };
39443+static const char * const sd_mux_p[] __initconst = { "armpll2", "armpll3", };
39444+static const char * const mmc1_mux_p[] __initconst = { "armpll2", "armpll3", };
39445+static const char * const mmc1_mux2_p[] __initconst = { "osc26m", "mmc1_div", };
39446+static const char * const g2d_mux_p[] __initconst = { "armpll2", "armpll3", };
39447+static const char * const venc_mux_p[] __initconst = { "armpll2", "armpll3", };
39448+static const char * const vdec_mux_p[] __initconst = { "armpll2", "armpll3", };
39449+static const char * const vpp_mux_p[] __initconst = { "armpll2", "armpll3", };
39450+static const char * const edc0_mux_p[] __initconst = { "armpll2", "armpll3", };
39451+static const char * const ldi0_mux_p[] __initconst = { "armpll2", "armpll4",
39452 "armpll3", "armpll5", };
39453-static const char *edc1_mux_p[] __initconst = { "armpll2", "armpll3", };
39454-static const char *ldi1_mux_p[] __initconst = { "armpll2", "armpll4",
39455+static const char * const edc1_mux_p[] __initconst = { "armpll2", "armpll3", };
39456+static const char * const ldi1_mux_p[] __initconst = { "armpll2", "armpll4",
39457 "armpll3", "armpll5", };
39458-static const char *rclk_hsic_p[] __initconst = { "armpll3", "armpll2", };
39459-static const char *mmc2_mux_p[] __initconst = { "armpll2", "armpll3", };
39460-static const char *mmc3_mux_p[] __initconst = { "armpll2", "armpll3", };
39461+static const char * const rclk_hsic_p[] __initconst = { "armpll3", "armpll2", };
39462+static const char * const mmc2_mux_p[] __initconst = { "armpll2", "armpll3", };
39463+static const char * const mmc3_mux_p[] __initconst = { "armpll2", "armpll3", };
39464
39465
39466 /* fixed rate clocks */
39467diff --git a/drivers/clk/hisilicon/clk-hix5hd2.c b/drivers/clk/hisilicon/clk-hix5hd2.c
39468index 3f369c6..05f9ffd 100644
39469--- a/drivers/clk/hisilicon/clk-hix5hd2.c
39470+++ b/drivers/clk/hisilicon/clk-hix5hd2.c
39471@@ -46,15 +46,15 @@ static struct hisi_fixed_rate_clock hix5hd2_fixed_rate_clks[] __initdata = {
39472 { HIX5HD2_FIXED_83M, "83m", NULL, CLK_IS_ROOT, 83333333, },
39473 };
39474
39475-static const char *sfc_mux_p[] __initconst = {
39476+static const char * const sfc_mux_p[] __initconst = {
39477 "24m", "150m", "200m", "100m", "75m", };
39478 static u32 sfc_mux_table[] = {0, 4, 5, 6, 7};
39479
39480-static const char *sdio_mux_p[] __initconst = {
39481+static const char * const sdio_mux_p[] __initconst = {
39482 "75m", "100m", "50m", "15m", };
39483 static u32 sdio_mux_table[] = {0, 1, 2, 3};
39484
39485-static const char *fephy_mux_p[] __initconst = { "25m", "125m"};
39486+static const char * const fephy_mux_p[] __initconst = { "25m", "125m"};
39487 static u32 fephy_mux_table[] = {0, 1};
39488
39489
39490diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
39491index 7eb684c..147c6fc 100644
39492--- a/drivers/clk/rockchip/clk-rk3188.c
39493+++ b/drivers/clk/rockchip/clk-rk3188.c
39494@@ -704,7 +704,7 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
39495 GATE(ACLK_GPS, "aclk_gps", "aclk_peri", 0, RK2928_CLKGATE_CON(8), 13, GFLAGS),
39496 };
39497
39498-static const char *rk3188_critical_clocks[] __initconst = {
39499+static const char * const rk3188_critical_clocks[] __initconst = {
39500 "aclk_cpu",
39501 "aclk_peri",
39502 "hclk_peri",
39503diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
39504index 05d7a0b..4fc131c 100644
39505--- a/drivers/clk/rockchip/clk-rk3288.c
39506+++ b/drivers/clk/rockchip/clk-rk3288.c
39507@@ -771,7 +771,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
39508 GATE(0, "pclk_isp_in", "ext_isp", 0, RK3288_CLKGATE_CON(16), 3, GFLAGS),
39509 };
39510
39511-static const char *rk3288_critical_clocks[] __initconst = {
39512+static const char * const rk3288_critical_clocks[] __initconst = {
39513 "aclk_cpu",
39514 "aclk_peri",
39515 "hclk_peri",
39516diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
39517index 58d2e3b..0c21b0d 100644
39518--- a/drivers/clk/rockchip/clk.h
39519+++ b/drivers/clk/rockchip/clk.h
39520@@ -182,7 +182,7 @@ struct clk *rockchip_clk_register_mmc(const char *name,
39521 const char **parent_names, u8 num_parents,
39522 void __iomem *reg, int shift);
39523
39524-#define PNAME(x) static const char *x[] __initconst
39525+#define PNAME(x) static const char * const x[] __initconst
39526
39527 enum rockchip_clk_branch_type {
39528 branch_composite,
39529diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
39530index e4c7538..99c50cd 100644
39531--- a/drivers/clk/samsung/clk.h
39532+++ b/drivers/clk/samsung/clk.h
39533@@ -260,7 +260,7 @@ struct samsung_gate_clock {
39534 #define GATE_DA(_id, dname, cname, pname, o, b, f, gf, a) \
39535 __GATE(_id, dname, cname, pname, o, b, f, gf, a)
39536
39537-#define PNAME(x) static const char *x[] __initdata
39538+#define PNAME(x) static const char * const x[] __initconst
39539
39540 /**
39541 * struct samsung_clk_reg_dump: register dump of clock controller registers.
39542diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
39543index dd3a78c..386d49c 100644
39544--- a/drivers/clk/socfpga/clk-gate.c
39545+++ b/drivers/clk/socfpga/clk-gate.c
39546@@ -22,6 +22,7 @@
39547 #include <linux/mfd/syscon.h>
39548 #include <linux/of.h>
39549 #include <linux/regmap.h>
39550+#include <asm/pgtable.h>
39551
39552 #include "clk.h"
39553
39554@@ -174,7 +175,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
39555 return 0;
39556 }
39557
39558-static struct clk_ops gateclk_ops = {
39559+static clk_ops_no_const gateclk_ops __read_only = {
39560 .prepare = socfpga_clk_prepare,
39561 .recalc_rate = socfpga_clk_recalc_rate,
39562 .get_parent = socfpga_clk_get_parent,
39563@@ -208,8 +209,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
39564 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
39565 socfpga_clk->hw.bit_idx = clk_gate[1];
39566
39567- gateclk_ops.enable = clk_gate_ops.enable;
39568- gateclk_ops.disable = clk_gate_ops.disable;
39569+ pax_open_kernel();
39570+ *(void **)&gateclk_ops.enable = clk_gate_ops.enable;
39571+ *(void **)&gateclk_ops.disable = clk_gate_ops.disable;
39572+ pax_close_kernel();
39573 }
39574
39575 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
39576diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
39577index de6da95..c98278b 100644
39578--- a/drivers/clk/socfpga/clk-pll.c
39579+++ b/drivers/clk/socfpga/clk-pll.c
39580@@ -21,6 +21,7 @@
39581 #include <linux/io.h>
39582 #include <linux/of.h>
39583 #include <linux/of_address.h>
39584+#include <asm/pgtable.h>
39585
39586 #include "clk.h"
39587
39588@@ -76,7 +77,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
39589 CLK_MGR_PLL_CLK_SRC_MASK;
39590 }
39591
39592-static struct clk_ops clk_pll_ops = {
39593+static clk_ops_no_const clk_pll_ops __read_only = {
39594 .recalc_rate = clk_pll_recalc_rate,
39595 .get_parent = clk_pll_get_parent,
39596 };
39597@@ -120,8 +121,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
39598 pll_clk->hw.hw.init = &init;
39599
39600 pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
39601- clk_pll_ops.enable = clk_gate_ops.enable;
39602- clk_pll_ops.disable = clk_gate_ops.disable;
39603+ pax_open_kernel();
39604+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
39605+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
39606+ pax_close_kernel();
39607
39608 clk = clk_register(NULL, &pll_clk->hw.hw);
39609 if (WARN_ON(IS_ERR(clk))) {
39610diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c
39611index 3654f61..81abe81 100644
39612--- a/drivers/clk/ti/composite.c
39613+++ b/drivers/clk/ti/composite.c
39614@@ -69,7 +69,7 @@ struct component_clk {
39615 struct list_head link;
39616 };
39617
39618-static const char * __initconst component_clk_types[] = {
39619+static const char * const __initconst component_clk_types[] = {
39620 "gate", "divider", "mux"
39621 };
39622
39623diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
39624index f870aad..04ba1e4 100644
39625--- a/drivers/clk/zynq/clkc.c
39626+++ b/drivers/clk/zynq/clkc.c
39627@@ -85,22 +85,22 @@ static DEFINE_SPINLOCK(canmioclk_lock);
39628 static DEFINE_SPINLOCK(dbgclk_lock);
39629 static DEFINE_SPINLOCK(aperclk_lock);
39630
39631-static const char *armpll_parents[] __initconst = {"armpll_int", "ps_clk"};
39632-static const char *ddrpll_parents[] __initconst = {"ddrpll_int", "ps_clk"};
39633-static const char *iopll_parents[] __initconst = {"iopll_int", "ps_clk"};
39634-static const char *gem0_mux_parents[] __initconst = {"gem0_div1", "dummy_name"};
39635-static const char *gem1_mux_parents[] __initconst = {"gem1_div1", "dummy_name"};
39636-static const char *can0_mio_mux2_parents[] __initconst = {"can0_gate",
39637+static const char * const armpll_parents[] __initconst = {"armpll_int", "ps_clk"};
39638+static const char * const ddrpll_parents[] __initconst = {"ddrpll_int", "ps_clk"};
39639+static const char * const iopll_parents[] __initconst = {"iopll_int", "ps_clk"};
39640+static const char * gem0_mux_parents[] __initdata = {"gem0_div1", "dummy_name"};
39641+static const char * gem1_mux_parents[] __initdata = {"gem1_div1", "dummy_name"};
39642+static const char * const can0_mio_mux2_parents[] __initconst = {"can0_gate",
39643 "can0_mio_mux"};
39644-static const char *can1_mio_mux2_parents[] __initconst = {"can1_gate",
39645+static const char * const can1_mio_mux2_parents[] __initconst = {"can1_gate",
39646 "can1_mio_mux"};
39647-static const char *dbg_emio_mux_parents[] __initconst = {"dbg_div",
39648+static const char * dbg_emio_mux_parents[] __initdata = {"dbg_div",
39649 "dummy_name"};
39650
39651-static const char *dbgtrc_emio_input_names[] __initconst = {"trace_emio_clk"};
39652-static const char *gem0_emio_input_names[] __initconst = {"gem0_emio_clk"};
39653-static const char *gem1_emio_input_names[] __initconst = {"gem1_emio_clk"};
39654-static const char *swdt_ext_clk_input_names[] __initconst = {"swdt_ext_clk"};
39655+static const char * const dbgtrc_emio_input_names[] __initconst = {"trace_emio_clk"};
39656+static const char * const gem0_emio_input_names[] __initconst = {"gem0_emio_clk"};
39657+static const char * const gem1_emio_input_names[] __initconst = {"gem1_emio_clk"};
39658+static const char * const swdt_ext_clk_input_names[] __initconst = {"swdt_ext_clk"};
39659
39660 static void __init zynq_clk_register_fclk(enum zynq_clk fclk,
39661 const char *clk_name, void __iomem *fclk_ctrl_reg,
39662diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
39663index b0c18ed..1713a80 100644
39664--- a/drivers/cpufreq/acpi-cpufreq.c
39665+++ b/drivers/cpufreq/acpi-cpufreq.c
39666@@ -675,8 +675,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39667 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
39668 per_cpu(acfreq_data, cpu) = data;
39669
39670- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
39671- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39672+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
39673+ pax_open_kernel();
39674+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39675+ pax_close_kernel();
39676+ }
39677
39678 result = acpi_processor_register_performance(data->acpi_data, cpu);
39679 if (result)
39680@@ -809,7 +812,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39681 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
39682 break;
39683 case ACPI_ADR_SPACE_FIXED_HARDWARE:
39684- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39685+ pax_open_kernel();
39686+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39687+ pax_close_kernel();
39688 break;
39689 default:
39690 break;
39691@@ -903,8 +908,10 @@ static void __init acpi_cpufreq_boost_init(void)
39692 if (!msrs)
39693 return;
39694
39695- acpi_cpufreq_driver.boost_supported = true;
39696- acpi_cpufreq_driver.boost_enabled = boost_state(0);
39697+ pax_open_kernel();
39698+ *(bool *)&acpi_cpufreq_driver.boost_supported = true;
39699+ *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0);
39700+ pax_close_kernel();
39701
39702 cpu_notifier_register_begin();
39703
39704diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
39705index bab67db..91af7e3 100644
39706--- a/drivers/cpufreq/cpufreq-dt.c
39707+++ b/drivers/cpufreq/cpufreq-dt.c
39708@@ -392,7 +392,9 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
39709 if (!IS_ERR(cpu_reg))
39710 regulator_put(cpu_reg);
39711
39712- dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39713+ pax_open_kernel();
39714+ *(void **)&dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39715+ pax_close_kernel();
39716
39717 ret = cpufreq_register_driver(&dt_cpufreq_driver);
39718 if (ret)
39719diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
39720index 8ae655c..3141442 100644
39721--- a/drivers/cpufreq/cpufreq.c
39722+++ b/drivers/cpufreq/cpufreq.c
39723@@ -2108,7 +2108,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
39724 }
39725
39726 mutex_lock(&cpufreq_governor_mutex);
39727- list_del(&governor->governor_list);
39728+ pax_list_del(&governor->governor_list);
39729 mutex_unlock(&cpufreq_governor_mutex);
39730 return;
39731 }
39732@@ -2323,7 +2323,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
39733 return NOTIFY_OK;
39734 }
39735
39736-static struct notifier_block __refdata cpufreq_cpu_notifier = {
39737+static struct notifier_block cpufreq_cpu_notifier = {
39738 .notifier_call = cpufreq_cpu_callback,
39739 };
39740
39741@@ -2363,13 +2363,17 @@ int cpufreq_boost_trigger_state(int state)
39742 return 0;
39743
39744 write_lock_irqsave(&cpufreq_driver_lock, flags);
39745- cpufreq_driver->boost_enabled = state;
39746+ pax_open_kernel();
39747+ *(bool *)&cpufreq_driver->boost_enabled = state;
39748+ pax_close_kernel();
39749 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39750
39751 ret = cpufreq_driver->set_boost(state);
39752 if (ret) {
39753 write_lock_irqsave(&cpufreq_driver_lock, flags);
39754- cpufreq_driver->boost_enabled = !state;
39755+ pax_open_kernel();
39756+ *(bool *)&cpufreq_driver->boost_enabled = !state;
39757+ pax_close_kernel();
39758 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39759
39760 pr_err("%s: Cannot %s BOOST\n",
39761@@ -2434,16 +2438,22 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
39762 cpufreq_driver = driver_data;
39763 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39764
39765- if (driver_data->setpolicy)
39766- driver_data->flags |= CPUFREQ_CONST_LOOPS;
39767+ if (driver_data->setpolicy) {
39768+ pax_open_kernel();
39769+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
39770+ pax_close_kernel();
39771+ }
39772
39773 if (cpufreq_boost_supported()) {
39774 /*
39775 * Check if driver provides function to enable boost -
39776 * if not, use cpufreq_boost_set_sw as default
39777 */
39778- if (!cpufreq_driver->set_boost)
39779- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39780+ if (!cpufreq_driver->set_boost) {
39781+ pax_open_kernel();
39782+ *(void **)&cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39783+ pax_close_kernel();
39784+ }
39785
39786 ret = cpufreq_sysfs_create_file(&boost.attr);
39787 if (ret) {
39788diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
39789index 1b44496..b80ff5e 100644
39790--- a/drivers/cpufreq/cpufreq_governor.c
39791+++ b/drivers/cpufreq/cpufreq_governor.c
39792@@ -245,7 +245,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39793 struct dbs_data *dbs_data;
39794 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
39795 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
39796- struct od_ops *od_ops = NULL;
39797+ const struct od_ops *od_ops = NULL;
39798 struct od_dbs_tuners *od_tuners = NULL;
39799 struct cs_dbs_tuners *cs_tuners = NULL;
39800 struct cpu_dbs_common_info *cpu_cdbs;
39801@@ -311,7 +311,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39802
39803 if ((cdata->governor == GOV_CONSERVATIVE) &&
39804 (!policy->governor->initialized)) {
39805- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39806+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39807
39808 cpufreq_register_notifier(cs_ops->notifier_block,
39809 CPUFREQ_TRANSITION_NOTIFIER);
39810@@ -331,7 +331,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39811
39812 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
39813 (policy->governor->initialized == 1)) {
39814- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39815+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39816
39817 cpufreq_unregister_notifier(cs_ops->notifier_block,
39818 CPUFREQ_TRANSITION_NOTIFIER);
39819diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
39820index cc401d1..8197340 100644
39821--- a/drivers/cpufreq/cpufreq_governor.h
39822+++ b/drivers/cpufreq/cpufreq_governor.h
39823@@ -212,7 +212,7 @@ struct common_dbs_data {
39824 void (*exit)(struct dbs_data *dbs_data);
39825
39826 /* Governor specific ops, see below */
39827- void *gov_ops;
39828+ const void *gov_ops;
39829 };
39830
39831 /* Governor Per policy data */
39832@@ -232,7 +232,7 @@ struct od_ops {
39833 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
39834 unsigned int freq_next, unsigned int relation);
39835 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
39836-};
39837+} __no_const;
39838
39839 struct cs_ops {
39840 struct notifier_block *notifier_block;
39841diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
39842index ad3f38f..8f086cd 100644
39843--- a/drivers/cpufreq/cpufreq_ondemand.c
39844+++ b/drivers/cpufreq/cpufreq_ondemand.c
39845@@ -524,7 +524,7 @@ static void od_exit(struct dbs_data *dbs_data)
39846
39847 define_get_cpu_dbs_routines(od_cpu_dbs_info);
39848
39849-static struct od_ops od_ops = {
39850+static struct od_ops od_ops __read_only = {
39851 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
39852 .powersave_bias_target = generic_powersave_bias_target,
39853 .freq_increase = dbs_freq_increase,
39854@@ -579,14 +579,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
39855 (struct cpufreq_policy *, unsigned int, unsigned int),
39856 unsigned int powersave_bias)
39857 {
39858- od_ops.powersave_bias_target = f;
39859+ pax_open_kernel();
39860+ *(void **)&od_ops.powersave_bias_target = f;
39861+ pax_close_kernel();
39862 od_set_powersave_bias(powersave_bias);
39863 }
39864 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
39865
39866 void od_unregister_powersave_bias_handler(void)
39867 {
39868- od_ops.powersave_bias_target = generic_powersave_bias_target;
39869+ pax_open_kernel();
39870+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
39871+ pax_close_kernel();
39872 od_set_powersave_bias(0);
39873 }
39874 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
39875diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
39876index 872c577..5fb3c20 100644
39877--- a/drivers/cpufreq/intel_pstate.c
39878+++ b/drivers/cpufreq/intel_pstate.c
39879@@ -133,10 +133,10 @@ struct pstate_funcs {
39880 struct cpu_defaults {
39881 struct pstate_adjust_policy pid_policy;
39882 struct pstate_funcs funcs;
39883-};
39884+} __do_const;
39885
39886 static struct pstate_adjust_policy pid_params;
39887-static struct pstate_funcs pstate_funcs;
39888+static struct pstate_funcs *pstate_funcs;
39889 static int hwp_active;
39890
39891 struct perf_limits {
39892@@ -690,18 +690,18 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
39893
39894 cpu->pstate.current_pstate = pstate;
39895
39896- pstate_funcs.set(cpu, pstate);
39897+ pstate_funcs->set(cpu, pstate);
39898 }
39899
39900 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
39901 {
39902- cpu->pstate.min_pstate = pstate_funcs.get_min();
39903- cpu->pstate.max_pstate = pstate_funcs.get_max();
39904- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
39905- cpu->pstate.scaling = pstate_funcs.get_scaling();
39906+ cpu->pstate.min_pstate = pstate_funcs->get_min();
39907+ cpu->pstate.max_pstate = pstate_funcs->get_max();
39908+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
39909+ cpu->pstate.scaling = pstate_funcs->get_scaling();
39910
39911- if (pstate_funcs.get_vid)
39912- pstate_funcs.get_vid(cpu);
39913+ if (pstate_funcs->get_vid)
39914+ pstate_funcs->get_vid(cpu);
39915 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
39916 }
39917
39918@@ -1030,9 +1030,9 @@ static int intel_pstate_msrs_not_valid(void)
39919 rdmsrl(MSR_IA32_APERF, aperf);
39920 rdmsrl(MSR_IA32_MPERF, mperf);
39921
39922- if (!pstate_funcs.get_max() ||
39923- !pstate_funcs.get_min() ||
39924- !pstate_funcs.get_turbo())
39925+ if (!pstate_funcs->get_max() ||
39926+ !pstate_funcs->get_min() ||
39927+ !pstate_funcs->get_turbo())
39928 return -ENODEV;
39929
39930 rdmsrl(MSR_IA32_APERF, tmp);
39931@@ -1046,7 +1046,7 @@ static int intel_pstate_msrs_not_valid(void)
39932 return 0;
39933 }
39934
39935-static void copy_pid_params(struct pstate_adjust_policy *policy)
39936+static void copy_pid_params(const struct pstate_adjust_policy *policy)
39937 {
39938 pid_params.sample_rate_ms = policy->sample_rate_ms;
39939 pid_params.p_gain_pct = policy->p_gain_pct;
39940@@ -1058,12 +1058,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
39941
39942 static void copy_cpu_funcs(struct pstate_funcs *funcs)
39943 {
39944- pstate_funcs.get_max = funcs->get_max;
39945- pstate_funcs.get_min = funcs->get_min;
39946- pstate_funcs.get_turbo = funcs->get_turbo;
39947- pstate_funcs.get_scaling = funcs->get_scaling;
39948- pstate_funcs.set = funcs->set;
39949- pstate_funcs.get_vid = funcs->get_vid;
39950+ pstate_funcs = funcs;
39951 }
39952
39953 #if IS_ENABLED(CONFIG_ACPI)
39954diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
39955index 529cfd9..0e28fff 100644
39956--- a/drivers/cpufreq/p4-clockmod.c
39957+++ b/drivers/cpufreq/p4-clockmod.c
39958@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39959 case 0x0F: /* Core Duo */
39960 case 0x16: /* Celeron Core */
39961 case 0x1C: /* Atom */
39962- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39963+ pax_open_kernel();
39964+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39965+ pax_close_kernel();
39966 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
39967 case 0x0D: /* Pentium M (Dothan) */
39968- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39969+ pax_open_kernel();
39970+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39971+ pax_close_kernel();
39972 /* fall through */
39973 case 0x09: /* Pentium M (Banias) */
39974 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
39975@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39976
39977 /* on P-4s, the TSC runs with constant frequency independent whether
39978 * throttling is active or not. */
39979- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39980+ pax_open_kernel();
39981+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39982+ pax_close_kernel();
39983
39984 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
39985 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
39986diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
39987index 9bb42ba..b01b4a2 100644
39988--- a/drivers/cpufreq/sparc-us3-cpufreq.c
39989+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
39990@@ -18,14 +18,12 @@
39991 #include <asm/head.h>
39992 #include <asm/timer.h>
39993
39994-static struct cpufreq_driver *cpufreq_us3_driver;
39995-
39996 struct us3_freq_percpu_info {
39997 struct cpufreq_frequency_table table[4];
39998 };
39999
40000 /* Indexed by cpu number. */
40001-static struct us3_freq_percpu_info *us3_freq_table;
40002+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
40003
40004 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
40005 * in the Safari config register.
40006@@ -156,16 +154,27 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
40007
40008 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
40009 {
40010- if (cpufreq_us3_driver)
40011- us3_freq_target(policy, 0);
40012+ us3_freq_target(policy, 0);
40013
40014 return 0;
40015 }
40016
40017+static int __init us3_freq_init(void);
40018+static void __exit us3_freq_exit(void);
40019+
40020+static struct cpufreq_driver cpufreq_us3_driver = {
40021+ .init = us3_freq_cpu_init,
40022+ .verify = cpufreq_generic_frequency_table_verify,
40023+ .target_index = us3_freq_target,
40024+ .get = us3_freq_get,
40025+ .exit = us3_freq_cpu_exit,
40026+ .name = "UltraSPARC-III",
40027+
40028+};
40029+
40030 static int __init us3_freq_init(void)
40031 {
40032 unsigned long manuf, impl, ver;
40033- int ret;
40034
40035 if (tlb_type != cheetah && tlb_type != cheetah_plus)
40036 return -ENODEV;
40037@@ -178,55 +187,15 @@ static int __init us3_freq_init(void)
40038 (impl == CHEETAH_IMPL ||
40039 impl == CHEETAH_PLUS_IMPL ||
40040 impl == JAGUAR_IMPL ||
40041- impl == PANTHER_IMPL)) {
40042- struct cpufreq_driver *driver;
40043-
40044- ret = -ENOMEM;
40045- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
40046- if (!driver)
40047- goto err_out;
40048-
40049- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
40050- GFP_KERNEL);
40051- if (!us3_freq_table)
40052- goto err_out;
40053-
40054- driver->init = us3_freq_cpu_init;
40055- driver->verify = cpufreq_generic_frequency_table_verify;
40056- driver->target_index = us3_freq_target;
40057- driver->get = us3_freq_get;
40058- driver->exit = us3_freq_cpu_exit;
40059- strcpy(driver->name, "UltraSPARC-III");
40060-
40061- cpufreq_us3_driver = driver;
40062- ret = cpufreq_register_driver(driver);
40063- if (ret)
40064- goto err_out;
40065-
40066- return 0;
40067-
40068-err_out:
40069- if (driver) {
40070- kfree(driver);
40071- cpufreq_us3_driver = NULL;
40072- }
40073- kfree(us3_freq_table);
40074- us3_freq_table = NULL;
40075- return ret;
40076- }
40077+ impl == PANTHER_IMPL))
40078+ return cpufreq_register_driver(&cpufreq_us3_driver);
40079
40080 return -ENODEV;
40081 }
40082
40083 static void __exit us3_freq_exit(void)
40084 {
40085- if (cpufreq_us3_driver) {
40086- cpufreq_unregister_driver(cpufreq_us3_driver);
40087- kfree(cpufreq_us3_driver);
40088- cpufreq_us3_driver = NULL;
40089- kfree(us3_freq_table);
40090- us3_freq_table = NULL;
40091- }
40092+ cpufreq_unregister_driver(&cpufreq_us3_driver);
40093 }
40094
40095 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
40096diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
40097index 7d4a315..21bb886 100644
40098--- a/drivers/cpufreq/speedstep-centrino.c
40099+++ b/drivers/cpufreq/speedstep-centrino.c
40100@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
40101 !cpu_has(cpu, X86_FEATURE_EST))
40102 return -ENODEV;
40103
40104- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
40105- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
40106+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
40107+ pax_open_kernel();
40108+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
40109+ pax_close_kernel();
40110+ }
40111
40112 if (policy->cpu != 0)
40113 return -ENODEV;
40114diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
40115index 2697e87..c32476c 100644
40116--- a/drivers/cpuidle/driver.c
40117+++ b/drivers/cpuidle/driver.c
40118@@ -194,7 +194,7 @@ static int poll_idle(struct cpuidle_device *dev,
40119
40120 static void poll_idle_init(struct cpuidle_driver *drv)
40121 {
40122- struct cpuidle_state *state = &drv->states[0];
40123+ cpuidle_state_no_const *state = &drv->states[0];
40124
40125 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
40126 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
40127diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
40128index fb9f511..213e6cc 100644
40129--- a/drivers/cpuidle/governor.c
40130+++ b/drivers/cpuidle/governor.c
40131@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
40132 mutex_lock(&cpuidle_lock);
40133 if (__cpuidle_find_governor(gov->name) == NULL) {
40134 ret = 0;
40135- list_add_tail(&gov->governor_list, &cpuidle_governors);
40136+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
40137 if (!cpuidle_curr_governor ||
40138 cpuidle_curr_governor->rating < gov->rating)
40139 cpuidle_switch_governor(gov);
40140diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
40141index 832a2c3..1794080 100644
40142--- a/drivers/cpuidle/sysfs.c
40143+++ b/drivers/cpuidle/sysfs.c
40144@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
40145 NULL
40146 };
40147
40148-static struct attribute_group cpuidle_attr_group = {
40149+static attribute_group_no_const cpuidle_attr_group = {
40150 .attrs = cpuidle_default_attrs,
40151 .name = "cpuidle",
40152 };
40153diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
40154index 8d2a772..33826c9 100644
40155--- a/drivers/crypto/hifn_795x.c
40156+++ b/drivers/crypto/hifn_795x.c
40157@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
40158 MODULE_PARM_DESC(hifn_pll_ref,
40159 "PLL reference clock (pci[freq] or ext[freq], default ext)");
40160
40161-static atomic_t hifn_dev_number;
40162+static atomic_unchecked_t hifn_dev_number;
40163
40164 #define ACRYPTO_OP_DECRYPT 0
40165 #define ACRYPTO_OP_ENCRYPT 1
40166@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
40167 goto err_out_disable_pci_device;
40168
40169 snprintf(name, sizeof(name), "hifn%d",
40170- atomic_inc_return(&hifn_dev_number)-1);
40171+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
40172
40173 err = pci_request_regions(pdev, name);
40174 if (err)
40175diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
40176index 30b538d8..1610d75 100644
40177--- a/drivers/devfreq/devfreq.c
40178+++ b/drivers/devfreq/devfreq.c
40179@@ -673,7 +673,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
40180 goto err_out;
40181 }
40182
40183- list_add(&governor->node, &devfreq_governor_list);
40184+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
40185
40186 list_for_each_entry(devfreq, &devfreq_list, node) {
40187 int ret = 0;
40188@@ -761,7 +761,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
40189 }
40190 }
40191
40192- list_del(&governor->node);
40193+ pax_list_del((struct list_head *)&governor->node);
40194 err_out:
40195 mutex_unlock(&devfreq_list_lock);
40196
40197diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
40198index 8ee383d..736b5de 100644
40199--- a/drivers/dma/sh/shdma-base.c
40200+++ b/drivers/dma/sh/shdma-base.c
40201@@ -228,8 +228,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
40202 schan->slave_id = -EINVAL;
40203 }
40204
40205- schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
40206- sdev->desc_size, GFP_KERNEL);
40207+ schan->desc = kcalloc(sdev->desc_size,
40208+ NR_DESCS_PER_CHANNEL, GFP_KERNEL);
40209 if (!schan->desc) {
40210 ret = -ENOMEM;
40211 goto edescalloc;
40212diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
40213index 9f1d4c7..fceff78 100644
40214--- a/drivers/dma/sh/shdmac.c
40215+++ b/drivers/dma/sh/shdmac.c
40216@@ -513,7 +513,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
40217 return ret;
40218 }
40219
40220-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
40221+static struct notifier_block sh_dmae_nmi_notifier = {
40222 .notifier_call = sh_dmae_nmi_handler,
40223
40224 /* Run before NMI debug handler and KGDB */
40225diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
40226index 592af5f..bb1d583 100644
40227--- a/drivers/edac/edac_device.c
40228+++ b/drivers/edac/edac_device.c
40229@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
40230 */
40231 int edac_device_alloc_index(void)
40232 {
40233- static atomic_t device_indexes = ATOMIC_INIT(0);
40234+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
40235
40236- return atomic_inc_return(&device_indexes) - 1;
40237+ return atomic_inc_return_unchecked(&device_indexes) - 1;
40238 }
40239 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
40240
40241diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
40242index c84eecb..4d7381d 100644
40243--- a/drivers/edac/edac_mc_sysfs.c
40244+++ b/drivers/edac/edac_mc_sysfs.c
40245@@ -154,7 +154,7 @@ static const char * const edac_caps[] = {
40246 struct dev_ch_attribute {
40247 struct device_attribute attr;
40248 int channel;
40249-};
40250+} __do_const;
40251
40252 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
40253 static struct dev_ch_attribute dev_attr_legacy_##_name = \
40254@@ -1009,15 +1009,17 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
40255 }
40256
40257 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
40258+ pax_open_kernel();
40259 if (mci->get_sdram_scrub_rate) {
40260- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
40261- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
40262+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
40263+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
40264 }
40265
40266 if (mci->set_sdram_scrub_rate) {
40267- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
40268- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
40269+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
40270+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
40271 }
40272+ pax_close_kernel();
40273
40274 err = device_create_file(&mci->dev, &dev_attr_sdram_scrub_rate);
40275 if (err) {
40276diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
40277index 2cf44b4d..6dd2dc7 100644
40278--- a/drivers/edac/edac_pci.c
40279+++ b/drivers/edac/edac_pci.c
40280@@ -29,7 +29,7 @@
40281
40282 static DEFINE_MUTEX(edac_pci_ctls_mutex);
40283 static LIST_HEAD(edac_pci_list);
40284-static atomic_t pci_indexes = ATOMIC_INIT(0);
40285+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
40286
40287 /*
40288 * edac_pci_alloc_ctl_info
40289@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
40290 */
40291 int edac_pci_alloc_index(void)
40292 {
40293- return atomic_inc_return(&pci_indexes) - 1;
40294+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
40295 }
40296 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
40297
40298diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
40299index 24d877f..4e30133 100644
40300--- a/drivers/edac/edac_pci_sysfs.c
40301+++ b/drivers/edac/edac_pci_sysfs.c
40302@@ -23,8 +23,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
40303 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
40304 static int edac_pci_poll_msec = 1000; /* one second workq period */
40305
40306-static atomic_t pci_parity_count = ATOMIC_INIT(0);
40307-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
40308+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
40309+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
40310
40311 static struct kobject *edac_pci_top_main_kobj;
40312 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
40313@@ -232,7 +232,7 @@ struct edac_pci_dev_attribute {
40314 void *value;
40315 ssize_t(*show) (void *, char *);
40316 ssize_t(*store) (void *, const char *, size_t);
40317-};
40318+} __do_const;
40319
40320 /* Set of show/store abstract level functions for PCI Parity object */
40321 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
40322@@ -576,7 +576,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40323 edac_printk(KERN_CRIT, EDAC_PCI,
40324 "Signaled System Error on %s\n",
40325 pci_name(dev));
40326- atomic_inc(&pci_nonparity_count);
40327+ atomic_inc_unchecked(&pci_nonparity_count);
40328 }
40329
40330 if (status & (PCI_STATUS_PARITY)) {
40331@@ -584,7 +584,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40332 "Master Data Parity Error on %s\n",
40333 pci_name(dev));
40334
40335- atomic_inc(&pci_parity_count);
40336+ atomic_inc_unchecked(&pci_parity_count);
40337 }
40338
40339 if (status & (PCI_STATUS_DETECTED_PARITY)) {
40340@@ -592,7 +592,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40341 "Detected Parity Error on %s\n",
40342 pci_name(dev));
40343
40344- atomic_inc(&pci_parity_count);
40345+ atomic_inc_unchecked(&pci_parity_count);
40346 }
40347 }
40348
40349@@ -615,7 +615,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40350 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
40351 "Signaled System Error on %s\n",
40352 pci_name(dev));
40353- atomic_inc(&pci_nonparity_count);
40354+ atomic_inc_unchecked(&pci_nonparity_count);
40355 }
40356
40357 if (status & (PCI_STATUS_PARITY)) {
40358@@ -623,7 +623,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40359 "Master Data Parity Error on "
40360 "%s\n", pci_name(dev));
40361
40362- atomic_inc(&pci_parity_count);
40363+ atomic_inc_unchecked(&pci_parity_count);
40364 }
40365
40366 if (status & (PCI_STATUS_DETECTED_PARITY)) {
40367@@ -631,7 +631,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40368 "Detected Parity Error on %s\n",
40369 pci_name(dev));
40370
40371- atomic_inc(&pci_parity_count);
40372+ atomic_inc_unchecked(&pci_parity_count);
40373 }
40374 }
40375 }
40376@@ -669,7 +669,7 @@ void edac_pci_do_parity_check(void)
40377 if (!check_pci_errors)
40378 return;
40379
40380- before_count = atomic_read(&pci_parity_count);
40381+ before_count = atomic_read_unchecked(&pci_parity_count);
40382
40383 /* scan all PCI devices looking for a Parity Error on devices and
40384 * bridges.
40385@@ -681,7 +681,7 @@ void edac_pci_do_parity_check(void)
40386 /* Only if operator has selected panic on PCI Error */
40387 if (edac_pci_get_panic_on_pe()) {
40388 /* If the count is different 'after' from 'before' */
40389- if (before_count != atomic_read(&pci_parity_count))
40390+ if (before_count != atomic_read_unchecked(&pci_parity_count))
40391 panic("EDAC: PCI Parity Error");
40392 }
40393 }
40394diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
40395index c2359a1..8bd119d 100644
40396--- a/drivers/edac/mce_amd.h
40397+++ b/drivers/edac/mce_amd.h
40398@@ -74,7 +74,7 @@ struct amd_decoder_ops {
40399 bool (*mc0_mce)(u16, u8);
40400 bool (*mc1_mce)(u16, u8);
40401 bool (*mc2_mce)(u16, u8);
40402-};
40403+} __no_const;
40404
40405 void amd_report_gart_errors(bool);
40406 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
40407diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
40408index 57ea7f4..af06b76 100644
40409--- a/drivers/firewire/core-card.c
40410+++ b/drivers/firewire/core-card.c
40411@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
40412 const struct fw_card_driver *driver,
40413 struct device *device)
40414 {
40415- static atomic_t index = ATOMIC_INIT(-1);
40416+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
40417
40418- card->index = atomic_inc_return(&index);
40419+ card->index = atomic_inc_return_unchecked(&index);
40420 card->driver = driver;
40421 card->device = device;
40422 card->current_tlabel = 0;
40423@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
40424
40425 void fw_core_remove_card(struct fw_card *card)
40426 {
40427- struct fw_card_driver dummy_driver = dummy_driver_template;
40428+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
40429
40430 card->driver->update_phy_reg(card, 4,
40431 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
40432diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
40433index f9e3aee..269dbdb 100644
40434--- a/drivers/firewire/core-device.c
40435+++ b/drivers/firewire/core-device.c
40436@@ -256,7 +256,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
40437 struct config_rom_attribute {
40438 struct device_attribute attr;
40439 u32 key;
40440-};
40441+} __do_const;
40442
40443 static ssize_t show_immediate(struct device *dev,
40444 struct device_attribute *dattr, char *buf)
40445diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
40446index d6a09b9..18e90dd 100644
40447--- a/drivers/firewire/core-transaction.c
40448+++ b/drivers/firewire/core-transaction.c
40449@@ -38,6 +38,7 @@
40450 #include <linux/timer.h>
40451 #include <linux/types.h>
40452 #include <linux/workqueue.h>
40453+#include <linux/sched.h>
40454
40455 #include <asm/byteorder.h>
40456
40457diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
40458index e1480ff6..1a429bd 100644
40459--- a/drivers/firewire/core.h
40460+++ b/drivers/firewire/core.h
40461@@ -111,6 +111,7 @@ struct fw_card_driver {
40462
40463 int (*stop_iso)(struct fw_iso_context *ctx);
40464 };
40465+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
40466
40467 void fw_card_initialize(struct fw_card *card,
40468 const struct fw_card_driver *driver, struct device *device);
40469diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
40470index f51d376..b118e40 100644
40471--- a/drivers/firewire/ohci.c
40472+++ b/drivers/firewire/ohci.c
40473@@ -2049,10 +2049,12 @@ static void bus_reset_work(struct work_struct *work)
40474 be32_to_cpu(ohci->next_header));
40475 }
40476
40477+#ifndef CONFIG_GRKERNSEC
40478 if (param_remote_dma) {
40479 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
40480 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
40481 }
40482+#endif
40483
40484 spin_unlock_irq(&ohci->lock);
40485
40486@@ -2584,8 +2586,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
40487 unsigned long flags;
40488 int n, ret = 0;
40489
40490+#ifndef CONFIG_GRKERNSEC
40491 if (param_remote_dma)
40492 return 0;
40493+#endif
40494
40495 /*
40496 * FIXME: Make sure this bitmask is cleared when we clear the busReset
40497diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
40498index 94a58a0..f5eba42 100644
40499--- a/drivers/firmware/dmi-id.c
40500+++ b/drivers/firmware/dmi-id.c
40501@@ -16,7 +16,7 @@
40502 struct dmi_device_attribute{
40503 struct device_attribute dev_attr;
40504 int field;
40505-};
40506+} __do_const;
40507 #define to_dmi_dev_attr(_dev_attr) \
40508 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
40509
40510diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
40511index 2eebd28b..4261350 100644
40512--- a/drivers/firmware/dmi_scan.c
40513+++ b/drivers/firmware/dmi_scan.c
40514@@ -893,7 +893,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
40515 if (buf == NULL)
40516 return -1;
40517
40518- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
40519+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
40520
40521 dmi_unmap(buf);
40522 return 0;
40523diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
40524index 4fd9961..52d60ce 100644
40525--- a/drivers/firmware/efi/cper.c
40526+++ b/drivers/firmware/efi/cper.c
40527@@ -44,12 +44,12 @@ static char rcd_decode_str[CPER_REC_LEN];
40528 */
40529 u64 cper_next_record_id(void)
40530 {
40531- static atomic64_t seq;
40532+ static atomic64_unchecked_t seq;
40533
40534- if (!atomic64_read(&seq))
40535- atomic64_set(&seq, ((u64)get_seconds()) << 32);
40536+ if (!atomic64_read_unchecked(&seq))
40537+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
40538
40539- return atomic64_inc_return(&seq);
40540+ return atomic64_inc_return_unchecked(&seq);
40541 }
40542 EXPORT_SYMBOL_GPL(cper_next_record_id);
40543
40544diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
40545index 3061bb8..92b5fcc 100644
40546--- a/drivers/firmware/efi/efi.c
40547+++ b/drivers/firmware/efi/efi.c
40548@@ -160,14 +160,16 @@ static struct attribute_group efi_subsys_attr_group = {
40549 };
40550
40551 static struct efivars generic_efivars;
40552-static struct efivar_operations generic_ops;
40553+static efivar_operations_no_const generic_ops __read_only;
40554
40555 static int generic_ops_register(void)
40556 {
40557- generic_ops.get_variable = efi.get_variable;
40558- generic_ops.set_variable = efi.set_variable;
40559- generic_ops.get_next_variable = efi.get_next_variable;
40560- generic_ops.query_variable_store = efi_query_variable_store;
40561+ pax_open_kernel();
40562+ *(void **)&generic_ops.get_variable = efi.get_variable;
40563+ *(void **)&generic_ops.set_variable = efi.set_variable;
40564+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
40565+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
40566+ pax_close_kernel();
40567
40568 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
40569 }
40570diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
40571index 7b2e049..a253334 100644
40572--- a/drivers/firmware/efi/efivars.c
40573+++ b/drivers/firmware/efi/efivars.c
40574@@ -589,7 +589,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
40575 static int
40576 create_efivars_bin_attributes(void)
40577 {
40578- struct bin_attribute *attr;
40579+ bin_attribute_no_const *attr;
40580 int error;
40581
40582 /* new_var */
40583diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
40584index 87b8e3b..c4afb35 100644
40585--- a/drivers/firmware/efi/runtime-map.c
40586+++ b/drivers/firmware/efi/runtime-map.c
40587@@ -97,7 +97,7 @@ static void map_release(struct kobject *kobj)
40588 kfree(entry);
40589 }
40590
40591-static struct kobj_type __refdata map_ktype = {
40592+static const struct kobj_type __refconst map_ktype = {
40593 .sysfs_ops = &map_attr_ops,
40594 .default_attrs = def_attrs,
40595 .release = map_release,
40596diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
40597index f1ab05e..ab51228 100644
40598--- a/drivers/firmware/google/gsmi.c
40599+++ b/drivers/firmware/google/gsmi.c
40600@@ -709,7 +709,7 @@ static u32 __init hash_oem_table_id(char s[8])
40601 return local_hash_64(input, 32);
40602 }
40603
40604-static struct dmi_system_id gsmi_dmi_table[] __initdata = {
40605+static const struct dmi_system_id gsmi_dmi_table[] __initconst = {
40606 {
40607 .ident = "Google Board",
40608 .matches = {
40609diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
40610index 2f569aa..26e4f39 100644
40611--- a/drivers/firmware/google/memconsole.c
40612+++ b/drivers/firmware/google/memconsole.c
40613@@ -136,7 +136,7 @@ static bool __init found_memconsole(void)
40614 return false;
40615 }
40616
40617-static struct dmi_system_id memconsole_dmi_table[] __initdata = {
40618+static const struct dmi_system_id memconsole_dmi_table[] __initconst = {
40619 {
40620 .ident = "Google Board",
40621 .matches = {
40622@@ -155,7 +155,10 @@ static int __init memconsole_init(void)
40623 if (!found_memconsole())
40624 return -ENODEV;
40625
40626- memconsole_bin_attr.size = memconsole_length;
40627+ pax_open_kernel();
40628+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
40629+ pax_close_kernel();
40630+
40631 return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
40632 }
40633
40634diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
40635index cc016c61..d35279e 100644
40636--- a/drivers/firmware/memmap.c
40637+++ b/drivers/firmware/memmap.c
40638@@ -124,7 +124,7 @@ static void __meminit release_firmware_map_entry(struct kobject *kobj)
40639 kfree(entry);
40640 }
40641
40642-static struct kobj_type __refdata memmap_ktype = {
40643+static const struct kobj_type __refconst memmap_ktype = {
40644 .release = release_firmware_map_entry,
40645 .sysfs_ops = &memmap_attr_ops,
40646 .default_attrs = def_attrs,
40647diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
40648index 3cfcfc6..09d6f117 100644
40649--- a/drivers/gpio/gpio-em.c
40650+++ b/drivers/gpio/gpio-em.c
40651@@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev)
40652 struct em_gio_priv *p;
40653 struct resource *io[2], *irq[2];
40654 struct gpio_chip *gpio_chip;
40655- struct irq_chip *irq_chip;
40656+ irq_chip_no_const *irq_chip;
40657 const char *name = dev_name(&pdev->dev);
40658 int ret;
40659
40660diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
40661index 7818cd1..1be40e5 100644
40662--- a/drivers/gpio/gpio-ich.c
40663+++ b/drivers/gpio/gpio-ich.c
40664@@ -94,7 +94,7 @@ struct ichx_desc {
40665 * this option allows driver caching written output values
40666 */
40667 bool use_outlvl_cache;
40668-};
40669+} __do_const;
40670
40671 static struct {
40672 spinlock_t lock;
40673diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
40674index f476ae2..05e1bdd 100644
40675--- a/drivers/gpio/gpio-omap.c
40676+++ b/drivers/gpio/gpio-omap.c
40677@@ -1188,7 +1188,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
40678 const struct omap_gpio_platform_data *pdata;
40679 struct resource *res;
40680 struct gpio_bank *bank;
40681- struct irq_chip *irqc;
40682+ irq_chip_no_const *irqc;
40683 int ret;
40684
40685 match = of_match_device(of_match_ptr(omap_gpio_match), dev);
40686diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
40687index c49522e..9a7ee54 100644
40688--- a/drivers/gpio/gpio-rcar.c
40689+++ b/drivers/gpio/gpio-rcar.c
40690@@ -348,7 +348,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
40691 struct gpio_rcar_priv *p;
40692 struct resource *io, *irq;
40693 struct gpio_chip *gpio_chip;
40694- struct irq_chip *irq_chip;
40695+ irq_chip_no_const *irq_chip;
40696 struct device *dev = &pdev->dev;
40697 const char *name = dev_name(dev);
40698 int ret;
40699diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
40700index c1caa45..f0f97d2 100644
40701--- a/drivers/gpio/gpio-vr41xx.c
40702+++ b/drivers/gpio/gpio-vr41xx.c
40703@@ -224,7 +224,7 @@ static int giu_get_irq(unsigned int irq)
40704 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
40705 maskl, pendl, maskh, pendh);
40706
40707- atomic_inc(&irq_err_count);
40708+ atomic_inc_unchecked(&irq_err_count);
40709
40710 return -EINVAL;
40711 }
40712diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
40713index 1ca9295..9f3d481 100644
40714--- a/drivers/gpio/gpiolib.c
40715+++ b/drivers/gpio/gpiolib.c
40716@@ -554,8 +554,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
40717 }
40718
40719 if (gpiochip->irqchip) {
40720- gpiochip->irqchip->irq_request_resources = NULL;
40721- gpiochip->irqchip->irq_release_resources = NULL;
40722+ pax_open_kernel();
40723+ *(void **)&gpiochip->irqchip->irq_request_resources = NULL;
40724+ *(void **)&gpiochip->irqchip->irq_release_resources = NULL;
40725+ pax_close_kernel();
40726 gpiochip->irqchip = NULL;
40727 }
40728 }
40729@@ -621,8 +623,11 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
40730 gpiochip->irqchip = NULL;
40731 return -EINVAL;
40732 }
40733- irqchip->irq_request_resources = gpiochip_irq_reqres;
40734- irqchip->irq_release_resources = gpiochip_irq_relres;
40735+
40736+ pax_open_kernel();
40737+ *(void **)&irqchip->irq_request_resources = gpiochip_irq_reqres;
40738+ *(void **)&irqchip->irq_release_resources = gpiochip_irq_relres;
40739+ pax_close_kernel();
40740
40741 /*
40742 * Prepare the mapping since the irqchip shall be orthogonal to
40743diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
40744index 488f51d..301d462 100644
40745--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
40746+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
40747@@ -118,7 +118,7 @@ struct device_queue_manager_ops {
40748 enum cache_policy alternate_policy,
40749 void __user *alternate_aperture_base,
40750 uint64_t alternate_aperture_size);
40751-};
40752+} __no_const;
40753
40754 /**
40755 * struct device_queue_manager
40756diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
40757index 5940531..a75b0e5 100644
40758--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
40759+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
40760@@ -62,7 +62,7 @@ struct kernel_queue_ops {
40761
40762 void (*submit_packet)(struct kernel_queue *kq);
40763 void (*rollback_packet)(struct kernel_queue *kq);
40764-};
40765+} __no_const;
40766
40767 struct kernel_queue {
40768 struct kernel_queue_ops ops;
40769diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
40770index 9b23525..65f4110 100644
40771--- a/drivers/gpu/drm/drm_context.c
40772+++ b/drivers/gpu/drm/drm_context.c
40773@@ -53,6 +53,9 @@ struct drm_ctx_list {
40774 */
40775 void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
40776 {
40777+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40778+ return;
40779+
40780 mutex_lock(&dev->struct_mutex);
40781 idr_remove(&dev->ctx_idr, ctx_handle);
40782 mutex_unlock(&dev->struct_mutex);
40783@@ -87,6 +90,9 @@ static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
40784 */
40785 int drm_legacy_ctxbitmap_init(struct drm_device * dev)
40786 {
40787+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40788+ return -EINVAL;
40789+
40790 idr_init(&dev->ctx_idr);
40791 return 0;
40792 }
40793@@ -101,6 +107,9 @@ int drm_legacy_ctxbitmap_init(struct drm_device * dev)
40794 */
40795 void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
40796 {
40797+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40798+ return;
40799+
40800 mutex_lock(&dev->struct_mutex);
40801 idr_destroy(&dev->ctx_idr);
40802 mutex_unlock(&dev->struct_mutex);
40803@@ -119,11 +128,14 @@ void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
40804 {
40805 struct drm_ctx_list *pos, *tmp;
40806
40807+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40808+ return;
40809+
40810 mutex_lock(&dev->ctxlist_mutex);
40811
40812 list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) {
40813 if (pos->tag == file &&
40814- pos->handle != DRM_KERNEL_CONTEXT) {
40815+ _DRM_LOCKING_CONTEXT(pos->handle) != DRM_KERNEL_CONTEXT) {
40816 if (dev->driver->context_dtor)
40817 dev->driver->context_dtor(dev, pos->handle);
40818
40819@@ -161,6 +173,9 @@ int drm_legacy_getsareactx(struct drm_device *dev, void *data,
40820 struct drm_local_map *map;
40821 struct drm_map_list *_entry;
40822
40823+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40824+ return -EINVAL;
40825+
40826 mutex_lock(&dev->struct_mutex);
40827
40828 map = idr_find(&dev->ctx_idr, request->ctx_id);
40829@@ -205,6 +220,9 @@ int drm_legacy_setsareactx(struct drm_device *dev, void *data,
40830 struct drm_local_map *map = NULL;
40831 struct drm_map_list *r_list = NULL;
40832
40833+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40834+ return -EINVAL;
40835+
40836 mutex_lock(&dev->struct_mutex);
40837 list_for_each_entry(r_list, &dev->maplist, head) {
40838 if (r_list->map
40839@@ -277,7 +295,13 @@ static int drm_context_switch_complete(struct drm_device *dev,
40840 {
40841 dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
40842
40843- if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
40844+ if (file_priv->master->lock.hw_lock == NULL) {
40845+ DRM_ERROR(
40846+ "Device has been unregistered. Hard exit. Process %d\n",
40847+ task_pid_nr(current));
40848+ send_sig(SIGTERM, current, 0);
40849+ return -EPERM;
40850+ } else if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
40851 DRM_ERROR("Lock isn't held after context switch\n");
40852 }
40853
40854@@ -305,6 +329,9 @@ int drm_legacy_resctx(struct drm_device *dev, void *data,
40855 struct drm_ctx ctx;
40856 int i;
40857
40858+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40859+ return -EINVAL;
40860+
40861 if (res->count >= DRM_RESERVED_CONTEXTS) {
40862 memset(&ctx, 0, sizeof(ctx));
40863 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
40864@@ -335,8 +362,11 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
40865 struct drm_ctx_list *ctx_entry;
40866 struct drm_ctx *ctx = data;
40867
40868+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40869+ return -EINVAL;
40870+
40871 ctx->handle = drm_legacy_ctxbitmap_next(dev);
40872- if (ctx->handle == DRM_KERNEL_CONTEXT) {
40873+ if (_DRM_LOCKING_CONTEXT(ctx->handle) == DRM_KERNEL_CONTEXT) {
40874 /* Skip kernel's context and get a new one. */
40875 ctx->handle = drm_legacy_ctxbitmap_next(dev);
40876 }
40877@@ -378,6 +408,9 @@ int drm_legacy_getctx(struct drm_device *dev, void *data,
40878 {
40879 struct drm_ctx *ctx = data;
40880
40881+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40882+ return -EINVAL;
40883+
40884 /* This is 0, because we don't handle any context flags */
40885 ctx->flags = 0;
40886
40887@@ -400,6 +433,9 @@ int drm_legacy_switchctx(struct drm_device *dev, void *data,
40888 {
40889 struct drm_ctx *ctx = data;
40890
40891+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40892+ return -EINVAL;
40893+
40894 DRM_DEBUG("%d\n", ctx->handle);
40895 return drm_context_switch(dev, dev->last_context, ctx->handle);
40896 }
40897@@ -420,6 +456,9 @@ int drm_legacy_newctx(struct drm_device *dev, void *data,
40898 {
40899 struct drm_ctx *ctx = data;
40900
40901+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40902+ return -EINVAL;
40903+
40904 DRM_DEBUG("%d\n", ctx->handle);
40905 drm_context_switch_complete(dev, file_priv, ctx->handle);
40906
40907@@ -442,8 +481,11 @@ int drm_legacy_rmctx(struct drm_device *dev, void *data,
40908 {
40909 struct drm_ctx *ctx = data;
40910
40911+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40912+ return -EINVAL;
40913+
40914 DRM_DEBUG("%d\n", ctx->handle);
40915- if (ctx->handle != DRM_KERNEL_CONTEXT) {
40916+ if (_DRM_LOCKING_CONTEXT(ctx->handle) != DRM_KERNEL_CONTEXT) {
40917 if (dev->driver->context_dtor)
40918 dev->driver->context_dtor(dev, ctx->handle);
40919 drm_legacy_ctxbitmap_free(dev, ctx->handle);
40920diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
40921index b6f076b..2918de2 100644
40922--- a/drivers/gpu/drm/drm_crtc.c
40923+++ b/drivers/gpu/drm/drm_crtc.c
40924@@ -4118,7 +4118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
40925 goto done;
40926 }
40927
40928- if (copy_to_user(&enum_ptr[copied].name,
40929+ if (copy_to_user(enum_ptr[copied].name,
40930 &prop_enum->name, DRM_PROP_NAME_LEN)) {
40931 ret = -EFAULT;
40932 goto done;
40933diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
40934index d512134..a80a8e4 100644
40935--- a/drivers/gpu/drm/drm_drv.c
40936+++ b/drivers/gpu/drm/drm_drv.c
40937@@ -448,7 +448,7 @@ void drm_unplug_dev(struct drm_device *dev)
40938
40939 drm_device_set_unplugged(dev);
40940
40941- if (dev->open_count == 0) {
40942+ if (local_read(&dev->open_count) == 0) {
40943 drm_put_dev(dev);
40944 }
40945 mutex_unlock(&drm_global_mutex);
40946@@ -596,10 +596,13 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
40947 if (drm_ht_create(&dev->map_hash, 12))
40948 goto err_minors;
40949
40950- ret = drm_legacy_ctxbitmap_init(dev);
40951- if (ret) {
40952- DRM_ERROR("Cannot allocate memory for context bitmap.\n");
40953- goto err_ht;
40954+ if (drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT)) {
40955+ ret = drm_legacy_ctxbitmap_init(dev);
40956+ if (ret) {
40957+ DRM_ERROR(
40958+ "Cannot allocate memory for context bitmap.\n");
40959+ goto err_ht;
40960+ }
40961 }
40962
40963 if (drm_core_check_feature(dev, DRIVER_GEM)) {
40964diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
40965index 076dd60..e4a4ba7 100644
40966--- a/drivers/gpu/drm/drm_fops.c
40967+++ b/drivers/gpu/drm/drm_fops.c
40968@@ -89,7 +89,7 @@ int drm_open(struct inode *inode, struct file *filp)
40969 return PTR_ERR(minor);
40970
40971 dev = minor->dev;
40972- if (!dev->open_count++)
40973+ if (local_inc_return(&dev->open_count) == 1)
40974 need_setup = 1;
40975
40976 /* share address_space across all char-devs of a single device */
40977@@ -106,7 +106,7 @@ int drm_open(struct inode *inode, struct file *filp)
40978 return 0;
40979
40980 err_undo:
40981- dev->open_count--;
40982+ local_dec(&dev->open_count);
40983 drm_minor_release(minor);
40984 return retcode;
40985 }
40986@@ -376,7 +376,7 @@ int drm_release(struct inode *inode, struct file *filp)
40987
40988 mutex_lock(&drm_global_mutex);
40989
40990- DRM_DEBUG("open_count = %d\n", dev->open_count);
40991+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
40992
40993 mutex_lock(&dev->struct_mutex);
40994 list_del(&file_priv->lhead);
40995@@ -389,10 +389,10 @@ int drm_release(struct inode *inode, struct file *filp)
40996 * Begin inline drm_release
40997 */
40998
40999- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
41000+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
41001 task_pid_nr(current),
41002 (long)old_encode_dev(file_priv->minor->kdev->devt),
41003- dev->open_count);
41004+ local_read(&dev->open_count));
41005
41006 /* Release any auth tokens that might point to this file_priv,
41007 (do that under the drm_global_mutex) */
41008@@ -465,7 +465,7 @@ int drm_release(struct inode *inode, struct file *filp)
41009 * End inline drm_release
41010 */
41011
41012- if (!--dev->open_count) {
41013+ if (local_dec_and_test(&dev->open_count)) {
41014 retcode = drm_lastclose(dev);
41015 if (drm_device_is_unplugged(dev))
41016 drm_put_dev(dev);
41017diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
41018index 3d2e91c..d31c4c9 100644
41019--- a/drivers/gpu/drm/drm_global.c
41020+++ b/drivers/gpu/drm/drm_global.c
41021@@ -36,7 +36,7 @@
41022 struct drm_global_item {
41023 struct mutex mutex;
41024 void *object;
41025- int refcount;
41026+ atomic_t refcount;
41027 };
41028
41029 static struct drm_global_item glob[DRM_GLOBAL_NUM];
41030@@ -49,7 +49,7 @@ void drm_global_init(void)
41031 struct drm_global_item *item = &glob[i];
41032 mutex_init(&item->mutex);
41033 item->object = NULL;
41034- item->refcount = 0;
41035+ atomic_set(&item->refcount, 0);
41036 }
41037 }
41038
41039@@ -59,7 +59,7 @@ void drm_global_release(void)
41040 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
41041 struct drm_global_item *item = &glob[i];
41042 BUG_ON(item->object != NULL);
41043- BUG_ON(item->refcount != 0);
41044+ BUG_ON(atomic_read(&item->refcount) != 0);
41045 }
41046 }
41047
41048@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
41049 struct drm_global_item *item = &glob[ref->global_type];
41050
41051 mutex_lock(&item->mutex);
41052- if (item->refcount == 0) {
41053+ if (atomic_read(&item->refcount) == 0) {
41054 item->object = kzalloc(ref->size, GFP_KERNEL);
41055 if (unlikely(item->object == NULL)) {
41056 ret = -ENOMEM;
41057@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
41058 goto out_err;
41059
41060 }
41061- ++item->refcount;
41062+ atomic_inc(&item->refcount);
41063 ref->object = item->object;
41064 mutex_unlock(&item->mutex);
41065 return 0;
41066@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
41067 struct drm_global_item *item = &glob[ref->global_type];
41068
41069 mutex_lock(&item->mutex);
41070- BUG_ON(item->refcount == 0);
41071+ BUG_ON(atomic_read(&item->refcount) == 0);
41072 BUG_ON(ref->object != item->object);
41073- if (--item->refcount == 0) {
41074+ if (atomic_dec_and_test(&item->refcount)) {
41075 ref->release(ref);
41076 item->object = NULL;
41077 }
41078diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
41079index f1b32f9..394f791 100644
41080--- a/drivers/gpu/drm/drm_info.c
41081+++ b/drivers/gpu/drm/drm_info.c
41082@@ -76,10 +76,13 @@ int drm_vm_info(struct seq_file *m, void *data)
41083 struct drm_local_map *map;
41084 struct drm_map_list *r_list;
41085
41086- /* Hardcoded from _DRM_FRAME_BUFFER,
41087- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
41088- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
41089- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
41090+ static const char * const types[] = {
41091+ [_DRM_FRAME_BUFFER] = "FB",
41092+ [_DRM_REGISTERS] = "REG",
41093+ [_DRM_SHM] = "SHM",
41094+ [_DRM_AGP] = "AGP",
41095+ [_DRM_SCATTER_GATHER] = "SG",
41096+ [_DRM_CONSISTENT] = "PCI"};
41097 const char *type;
41098 int i;
41099
41100@@ -90,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
41101 map = r_list->map;
41102 if (!map)
41103 continue;
41104- if (map->type < 0 || map->type > 5)
41105+ if (map->type >= ARRAY_SIZE(types))
41106 type = "??";
41107 else
41108 type = types[map->type];
41109diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
41110index 2f4c4343..dd12cd2 100644
41111--- a/drivers/gpu/drm/drm_ioc32.c
41112+++ b/drivers/gpu/drm/drm_ioc32.c
41113@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
41114 request = compat_alloc_user_space(nbytes);
41115 if (!access_ok(VERIFY_WRITE, request, nbytes))
41116 return -EFAULT;
41117- list = (struct drm_buf_desc *) (request + 1);
41118+ list = (struct drm_buf_desc __user *) (request + 1);
41119
41120 if (__put_user(count, &request->count)
41121 || __put_user(list, &request->list))
41122@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
41123 request = compat_alloc_user_space(nbytes);
41124 if (!access_ok(VERIFY_WRITE, request, nbytes))
41125 return -EFAULT;
41126- list = (struct drm_buf_pub *) (request + 1);
41127+ list = (struct drm_buf_pub __user *) (request + 1);
41128
41129 if (__put_user(count, &request->count)
41130 || __put_user(list, &request->list))
41131@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
41132 return 0;
41133 }
41134
41135-drm_ioctl_compat_t *drm_compat_ioctls[] = {
41136+drm_ioctl_compat_t drm_compat_ioctls[] = {
41137 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
41138 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
41139 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
41140@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
41141 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41142 {
41143 unsigned int nr = DRM_IOCTL_NR(cmd);
41144- drm_ioctl_compat_t *fn;
41145 int ret;
41146
41147 /* Assume that ioctls without an explicit compat routine will just
41148@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41149 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
41150 return drm_ioctl(filp, cmd, arg);
41151
41152- fn = drm_compat_ioctls[nr];
41153-
41154- if (fn != NULL)
41155- ret = (*fn) (filp, cmd, arg);
41156+ if (drm_compat_ioctls[nr] != NULL)
41157+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
41158 else
41159 ret = drm_ioctl(filp, cmd, arg);
41160
41161diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
41162index 3785d66..1c489ef 100644
41163--- a/drivers/gpu/drm/drm_ioctl.c
41164+++ b/drivers/gpu/drm/drm_ioctl.c
41165@@ -655,7 +655,7 @@ long drm_ioctl(struct file *filp,
41166 struct drm_file *file_priv = filp->private_data;
41167 struct drm_device *dev;
41168 const struct drm_ioctl_desc *ioctl = NULL;
41169- drm_ioctl_t *func;
41170+ drm_ioctl_no_const_t func;
41171 unsigned int nr = DRM_IOCTL_NR(cmd);
41172 int retcode = -EINVAL;
41173 char stack_kdata[128];
41174diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
41175index f861361..b61d4c7 100644
41176--- a/drivers/gpu/drm/drm_lock.c
41177+++ b/drivers/gpu/drm/drm_lock.c
41178@@ -61,9 +61,12 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
41179 struct drm_master *master = file_priv->master;
41180 int ret = 0;
41181
41182+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
41183+ return -EINVAL;
41184+
41185 ++file_priv->lock_count;
41186
41187- if (lock->context == DRM_KERNEL_CONTEXT) {
41188+ if (_DRM_LOCKING_CONTEXT(lock->context) == DRM_KERNEL_CONTEXT) {
41189 DRM_ERROR("Process %d using kernel context %d\n",
41190 task_pid_nr(current), lock->context);
41191 return -EINVAL;
41192@@ -153,12 +156,23 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
41193 struct drm_lock *lock = data;
41194 struct drm_master *master = file_priv->master;
41195
41196- if (lock->context == DRM_KERNEL_CONTEXT) {
41197+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
41198+ return -EINVAL;
41199+
41200+ if (_DRM_LOCKING_CONTEXT(lock->context) == DRM_KERNEL_CONTEXT) {
41201 DRM_ERROR("Process %d using kernel context %d\n",
41202 task_pid_nr(current), lock->context);
41203 return -EINVAL;
41204 }
41205
41206+ if (!master->lock.hw_lock) {
41207+ DRM_ERROR(
41208+ "Device has been unregistered. Hard exit. Process %d\n",
41209+ task_pid_nr(current));
41210+ send_sig(SIGTERM, current, 0);
41211+ return -EPERM;
41212+ }
41213+
41214 if (drm_legacy_lock_free(&master->lock, lock->context)) {
41215 /* FIXME: Should really bail out here. */
41216 }
41217diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
41218index d4813e0..6c1ab4d 100644
41219--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
41220+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
41221@@ -825,10 +825,16 @@ void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
41222 u32 pipeconf_reg = PIPEACONF;
41223 u32 dspcntr_reg = DSPACNTR;
41224
41225- u32 pipeconf = dev_priv->pipeconf[pipe];
41226- u32 dspcntr = dev_priv->dspcntr[pipe];
41227+ u32 pipeconf;
41228+ u32 dspcntr;
41229 u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
41230
41231+ if (pipe == -1)
41232+ return;
41233+
41234+ pipeconf = dev_priv->pipeconf[pipe];
41235+ dspcntr = dev_priv->dspcntr[pipe];
41236+
41237 if (pipe) {
41238 pipeconf_reg = PIPECCONF;
41239 dspcntr_reg = DSPCCNTR;
41240diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
41241index 93ec5dc..82acbaf 100644
41242--- a/drivers/gpu/drm/i810/i810_drv.h
41243+++ b/drivers/gpu/drm/i810/i810_drv.h
41244@@ -110,8 +110,8 @@ typedef struct drm_i810_private {
41245 int page_flipping;
41246
41247 wait_queue_head_t irq_queue;
41248- atomic_t irq_received;
41249- atomic_t irq_emitted;
41250+ atomic_unchecked_t irq_received;
41251+ atomic_unchecked_t irq_emitted;
41252
41253 int front_offset;
41254 } drm_i810_private_t;
41255diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
41256index 1a46787..7fb387c 100644
41257--- a/drivers/gpu/drm/i915/i915_dma.c
41258+++ b/drivers/gpu/drm/i915/i915_dma.c
41259@@ -149,6 +149,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
41260 case I915_PARAM_MMAP_VERSION:
41261 value = 1;
41262 break;
41263+ case I915_PARAM_HAS_LEGACY_CONTEXT:
41264+ value = drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT);
41265+ break;
41266 default:
41267 DRM_DEBUG("Unknown parameter %d\n", param->param);
41268 return -EINVAL;
41269@@ -362,7 +365,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
41270 * locking inversion with the driver load path. And the access here is
41271 * completely racy anyway. So don't bother with locking for now.
41272 */
41273- return dev->open_count == 0;
41274+ return local_read(&dev->open_count) == 0;
41275 }
41276
41277 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
41278diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
41279index 38a7425..5322b16 100644
41280--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
41281+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
41282@@ -872,12 +872,12 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
41283 static int
41284 validate_exec_list(struct drm_device *dev,
41285 struct drm_i915_gem_exec_object2 *exec,
41286- int count)
41287+ unsigned int count)
41288 {
41289 unsigned relocs_total = 0;
41290 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
41291 unsigned invalid_flags;
41292- int i;
41293+ unsigned int i;
41294
41295 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
41296 if (USES_FULL_PPGTT(dev))
41297diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
41298index 176de63..b50b66a 100644
41299--- a/drivers/gpu/drm/i915/i915_ioc32.c
41300+++ b/drivers/gpu/drm/i915/i915_ioc32.c
41301@@ -62,7 +62,7 @@ static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
41302 || __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
41303 || __put_user(batchbuffer32.num_cliprects,
41304 &batchbuffer->num_cliprects)
41305- || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
41306+ || __put_user((struct drm_clip_rect __user *)(unsigned long)batchbuffer32.cliprects,
41307 &batchbuffer->cliprects))
41308 return -EFAULT;
41309
41310@@ -91,13 +91,13 @@ static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
41311
41312 cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
41313 if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
41314- || __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
41315+ || __put_user((char __user *)(unsigned long)cmdbuffer32.buf,
41316 &cmdbuffer->buf)
41317 || __put_user(cmdbuffer32.sz, &cmdbuffer->sz)
41318 || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1)
41319 || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4)
41320 || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects)
41321- || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
41322+ || __put_user((struct drm_clip_rect __user *)(unsigned long)cmdbuffer32.cliprects,
41323 &cmdbuffer->cliprects))
41324 return -EFAULT;
41325
41326@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
41327 (unsigned long)request);
41328 }
41329
41330-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
41331+static drm_ioctl_compat_t i915_compat_ioctls[] = {
41332 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
41333 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
41334 [DRM_I915_GETPARAM] = compat_i915_getparam,
41335@@ -201,17 +201,13 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
41336 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41337 {
41338 unsigned int nr = DRM_IOCTL_NR(cmd);
41339- drm_ioctl_compat_t *fn = NULL;
41340 int ret;
41341
41342 if (nr < DRM_COMMAND_BASE)
41343 return drm_compat_ioctl(filp, cmd, arg);
41344
41345- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
41346- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
41347-
41348- if (fn != NULL)
41349- ret = (*fn) (filp, cmd, arg);
41350+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls) && i915_compat_ioctls[nr - DRM_COMMAND_BASE])
41351+ ret = (*i915_compat_ioctls[nr - DRM_COMMAND_BASE])(filp, cmd, arg);
41352 else
41353 ret = drm_ioctl(filp, cmd, arg);
41354
41355diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
41356index f75173c..f283e45 100644
41357--- a/drivers/gpu/drm/i915/intel_display.c
41358+++ b/drivers/gpu/drm/i915/intel_display.c
41359@@ -13056,13 +13056,13 @@ struct intel_quirk {
41360 int subsystem_vendor;
41361 int subsystem_device;
41362 void (*hook)(struct drm_device *dev);
41363-};
41364+} __do_const;
41365
41366 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
41367 struct intel_dmi_quirk {
41368 void (*hook)(struct drm_device *dev);
41369 const struct dmi_system_id (*dmi_id_list)[];
41370-};
41371+} __do_const;
41372
41373 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
41374 {
41375@@ -13070,18 +13070,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
41376 return 1;
41377 }
41378
41379-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
41380+static const struct dmi_system_id intel_dmi_quirks_table[] = {
41381 {
41382- .dmi_id_list = &(const struct dmi_system_id[]) {
41383- {
41384- .callback = intel_dmi_reverse_brightness,
41385- .ident = "NCR Corporation",
41386- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
41387- DMI_MATCH(DMI_PRODUCT_NAME, ""),
41388- },
41389- },
41390- { } /* terminating entry */
41391+ .callback = intel_dmi_reverse_brightness,
41392+ .ident = "NCR Corporation",
41393+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
41394+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
41395 },
41396+ },
41397+ { } /* terminating entry */
41398+};
41399+
41400+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
41401+ {
41402+ .dmi_id_list = &intel_dmi_quirks_table,
41403 .hook = quirk_invert_brightness,
41404 },
41405 };
41406diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
41407index a002f53..0d60514 100644
41408--- a/drivers/gpu/drm/imx/imx-drm-core.c
41409+++ b/drivers/gpu/drm/imx/imx-drm-core.c
41410@@ -355,7 +355,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
41411 if (imxdrm->pipes >= MAX_CRTC)
41412 return -EINVAL;
41413
41414- if (imxdrm->drm->open_count)
41415+ if (local_read(&imxdrm->drm->open_count))
41416 return -EBUSY;
41417
41418 imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
41419diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
41420index b4a20149..219ab78 100644
41421--- a/drivers/gpu/drm/mga/mga_drv.h
41422+++ b/drivers/gpu/drm/mga/mga_drv.h
41423@@ -122,9 +122,9 @@ typedef struct drm_mga_private {
41424 u32 clear_cmd;
41425 u32 maccess;
41426
41427- atomic_t vbl_received; /**< Number of vblanks received. */
41428+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
41429 wait_queue_head_t fence_queue;
41430- atomic_t last_fence_retired;
41431+ atomic_unchecked_t last_fence_retired;
41432 u32 next_fence_to_post;
41433
41434 unsigned int fb_cpp;
41435diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
41436index 729bfd5..14bae78 100644
41437--- a/drivers/gpu/drm/mga/mga_ioc32.c
41438+++ b/drivers/gpu/drm/mga/mga_ioc32.c
41439@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
41440 return 0;
41441 }
41442
41443-drm_ioctl_compat_t *mga_compat_ioctls[] = {
41444+drm_ioctl_compat_t mga_compat_ioctls[] = {
41445 [DRM_MGA_INIT] = compat_mga_init,
41446 [DRM_MGA_GETPARAM] = compat_mga_getparam,
41447 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
41448@@ -208,17 +208,13 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
41449 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41450 {
41451 unsigned int nr = DRM_IOCTL_NR(cmd);
41452- drm_ioctl_compat_t *fn = NULL;
41453 int ret;
41454
41455 if (nr < DRM_COMMAND_BASE)
41456 return drm_compat_ioctl(filp, cmd, arg);
41457
41458- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
41459- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
41460-
41461- if (fn != NULL)
41462- ret = (*fn) (filp, cmd, arg);
41463+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls) && mga_compat_ioctls[nr - DRM_COMMAND_BASE])
41464+ ret = (*mga_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
41465 else
41466 ret = drm_ioctl(filp, cmd, arg);
41467
41468diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
41469index 1b071b8..de8601a 100644
41470--- a/drivers/gpu/drm/mga/mga_irq.c
41471+++ b/drivers/gpu/drm/mga/mga_irq.c
41472@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
41473 if (crtc != 0)
41474 return 0;
41475
41476- return atomic_read(&dev_priv->vbl_received);
41477+ return atomic_read_unchecked(&dev_priv->vbl_received);
41478 }
41479
41480
41481@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
41482 /* VBLANK interrupt */
41483 if (status & MGA_VLINEPEN) {
41484 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
41485- atomic_inc(&dev_priv->vbl_received);
41486+ atomic_inc_unchecked(&dev_priv->vbl_received);
41487 drm_handle_vblank(dev, 0);
41488 handled = 1;
41489 }
41490@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
41491 if ((prim_start & ~0x03) != (prim_end & ~0x03))
41492 MGA_WRITE(MGA_PRIMEND, prim_end);
41493
41494- atomic_inc(&dev_priv->last_fence_retired);
41495+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
41496 wake_up(&dev_priv->fence_queue);
41497 handled = 1;
41498 }
41499@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
41500 * using fences.
41501 */
41502 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
41503- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
41504+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
41505 - *sequence) <= (1 << 23)));
41506
41507 *sequence = cur_fence;
41508diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
41509index 0190b69..60c3eaf 100644
41510--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
41511+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
41512@@ -963,7 +963,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
41513 struct bit_table {
41514 const char id;
41515 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
41516-};
41517+} __no_const;
41518
41519 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
41520
41521diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
41522index 8763deb..936b423 100644
41523--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
41524+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
41525@@ -940,7 +940,8 @@ static struct drm_driver
41526 driver_stub = {
41527 .driver_features =
41528 DRIVER_USE_AGP |
41529- DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
41530+ DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
41531+ DRIVER_KMS_LEGACY_CONTEXT,
41532
41533 .load = nouveau_drm_load,
41534 .unload = nouveau_drm_unload,
41535diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
41536index fc68f09..0511d71 100644
41537--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
41538+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
41539@@ -121,7 +121,6 @@ struct nouveau_drm {
41540 struct drm_global_reference mem_global_ref;
41541 struct ttm_bo_global_ref bo_global_ref;
41542 struct ttm_bo_device bdev;
41543- atomic_t validate_sequence;
41544 int (*move)(struct nouveau_channel *,
41545 struct ttm_buffer_object *,
41546 struct ttm_mem_reg *, struct ttm_mem_reg *);
41547diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41548index 462679a..88e32a7 100644
41549--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41550+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41551@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
41552 unsigned long arg)
41553 {
41554 unsigned int nr = DRM_IOCTL_NR(cmd);
41555- drm_ioctl_compat_t *fn = NULL;
41556+ drm_ioctl_compat_t fn = NULL;
41557 int ret;
41558
41559 if (nr < DRM_COMMAND_BASE)
41560diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
41561index 273e501..3b6c0a2 100644
41562--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
41563+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
41564@@ -127,11 +127,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41565 }
41566
41567 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
41568- nouveau_vram_manager_init,
41569- nouveau_vram_manager_fini,
41570- nouveau_vram_manager_new,
41571- nouveau_vram_manager_del,
41572- nouveau_vram_manager_debug
41573+ .init = nouveau_vram_manager_init,
41574+ .takedown = nouveau_vram_manager_fini,
41575+ .get_node = nouveau_vram_manager_new,
41576+ .put_node = nouveau_vram_manager_del,
41577+ .debug = nouveau_vram_manager_debug
41578 };
41579
41580 static int
41581@@ -195,11 +195,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41582 }
41583
41584 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
41585- nouveau_gart_manager_init,
41586- nouveau_gart_manager_fini,
41587- nouveau_gart_manager_new,
41588- nouveau_gart_manager_del,
41589- nouveau_gart_manager_debug
41590+ .init = nouveau_gart_manager_init,
41591+ .takedown = nouveau_gart_manager_fini,
41592+ .get_node = nouveau_gart_manager_new,
41593+ .put_node = nouveau_gart_manager_del,
41594+ .debug = nouveau_gart_manager_debug
41595 };
41596
41597 /*XXX*/
41598@@ -268,11 +268,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41599 }
41600
41601 const struct ttm_mem_type_manager_func nv04_gart_manager = {
41602- nv04_gart_manager_init,
41603- nv04_gart_manager_fini,
41604- nv04_gart_manager_new,
41605- nv04_gart_manager_del,
41606- nv04_gart_manager_debug
41607+ .init = nv04_gart_manager_init,
41608+ .takedown = nv04_gart_manager_fini,
41609+ .get_node = nv04_gart_manager_new,
41610+ .put_node = nv04_gart_manager_del,
41611+ .debug = nv04_gart_manager_debug
41612 };
41613
41614 int
41615diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
41616index c7592ec..dd45ebc 100644
41617--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
41618+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
41619@@ -72,7 +72,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
41620 * locking inversion with the driver load path. And the access here is
41621 * completely racy anyway. So don't bother with locking for now.
41622 */
41623- return dev->open_count == 0;
41624+ return local_read(&dev->open_count) == 0;
41625 }
41626
41627 static const struct vga_switcheroo_client_ops
41628diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
41629index 9782364..89bd954 100644
41630--- a/drivers/gpu/drm/qxl/qxl_cmd.c
41631+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
41632@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
41633 int ret;
41634
41635 mutex_lock(&qdev->async_io_mutex);
41636- irq_num = atomic_read(&qdev->irq_received_io_cmd);
41637+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
41638 if (qdev->last_sent_io_cmd > irq_num) {
41639 if (intr)
41640 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
41641- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41642+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41643 else
41644 ret = wait_event_timeout(qdev->io_cmd_event,
41645- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41646+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41647 /* 0 is timeout, just bail the "hw" has gone away */
41648 if (ret <= 0)
41649 goto out;
41650- irq_num = atomic_read(&qdev->irq_received_io_cmd);
41651+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
41652 }
41653 outb(val, addr);
41654 qdev->last_sent_io_cmd = irq_num + 1;
41655 if (intr)
41656 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
41657- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41658+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41659 else
41660 ret = wait_event_timeout(qdev->io_cmd_event,
41661- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41662+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41663 out:
41664 if (ret > 0)
41665 ret = 0;
41666diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
41667index 6911b8c..89d6867 100644
41668--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
41669+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
41670@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
41671 struct drm_info_node *node = (struct drm_info_node *) m->private;
41672 struct qxl_device *qdev = node->minor->dev->dev_private;
41673
41674- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
41675- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
41676- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
41677- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
41678+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
41679+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
41680+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
41681+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
41682 seq_printf(m, "%d\n", qdev->irq_received_error);
41683 return 0;
41684 }
41685diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
41686index 7c6cafe..460f542 100644
41687--- a/drivers/gpu/drm/qxl/qxl_drv.h
41688+++ b/drivers/gpu/drm/qxl/qxl_drv.h
41689@@ -290,10 +290,10 @@ struct qxl_device {
41690 unsigned int last_sent_io_cmd;
41691
41692 /* interrupt handling */
41693- atomic_t irq_received;
41694- atomic_t irq_received_display;
41695- atomic_t irq_received_cursor;
41696- atomic_t irq_received_io_cmd;
41697+ atomic_unchecked_t irq_received;
41698+ atomic_unchecked_t irq_received_display;
41699+ atomic_unchecked_t irq_received_cursor;
41700+ atomic_unchecked_t irq_received_io_cmd;
41701 unsigned irq_received_error;
41702 wait_queue_head_t display_event;
41703 wait_queue_head_t cursor_event;
41704diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
41705index b110883..dd06418 100644
41706--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
41707+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
41708@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
41709
41710 /* TODO copy slow path code from i915 */
41711 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
41712- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
41713+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
41714
41715 {
41716 struct qxl_drawable *draw = fb_cmd;
41717@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
41718 struct drm_qxl_reloc reloc;
41719
41720 if (copy_from_user(&reloc,
41721- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
41722+ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
41723 sizeof(reloc))) {
41724 ret = -EFAULT;
41725 goto out_free_bos;
41726@@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
41727
41728 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
41729
41730- struct drm_qxl_command *commands =
41731- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
41732+ struct drm_qxl_command __user *commands =
41733+ (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands;
41734
41735- if (copy_from_user(&user_cmd, &commands[cmd_num],
41736+ if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
41737 sizeof(user_cmd)))
41738 return -EFAULT;
41739
41740diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
41741index 0bf1e20..42a7310 100644
41742--- a/drivers/gpu/drm/qxl/qxl_irq.c
41743+++ b/drivers/gpu/drm/qxl/qxl_irq.c
41744@@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
41745 if (!pending)
41746 return IRQ_NONE;
41747
41748- atomic_inc(&qdev->irq_received);
41749+ atomic_inc_unchecked(&qdev->irq_received);
41750
41751 if (pending & QXL_INTERRUPT_DISPLAY) {
41752- atomic_inc(&qdev->irq_received_display);
41753+ atomic_inc_unchecked(&qdev->irq_received_display);
41754 wake_up_all(&qdev->display_event);
41755 qxl_queue_garbage_collect(qdev, false);
41756 }
41757 if (pending & QXL_INTERRUPT_CURSOR) {
41758- atomic_inc(&qdev->irq_received_cursor);
41759+ atomic_inc_unchecked(&qdev->irq_received_cursor);
41760 wake_up_all(&qdev->cursor_event);
41761 }
41762 if (pending & QXL_INTERRUPT_IO_CMD) {
41763- atomic_inc(&qdev->irq_received_io_cmd);
41764+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
41765 wake_up_all(&qdev->io_cmd_event);
41766 }
41767 if (pending & QXL_INTERRUPT_ERROR) {
41768@@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev)
41769 init_waitqueue_head(&qdev->io_cmd_event);
41770 INIT_WORK(&qdev->client_monitors_config_work,
41771 qxl_client_monitors_config_work_func);
41772- atomic_set(&qdev->irq_received, 0);
41773- atomic_set(&qdev->irq_received_display, 0);
41774- atomic_set(&qdev->irq_received_cursor, 0);
41775- atomic_set(&qdev->irq_received_io_cmd, 0);
41776+ atomic_set_unchecked(&qdev->irq_received, 0);
41777+ atomic_set_unchecked(&qdev->irq_received_display, 0);
41778+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
41779+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
41780 qdev->irq_received_error = 0;
41781 ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
41782 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
41783diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
41784index 0cbc4c9..0e46686 100644
41785--- a/drivers/gpu/drm/qxl/qxl_ttm.c
41786+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
41787@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
41788 }
41789 }
41790
41791-static struct vm_operations_struct qxl_ttm_vm_ops;
41792+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
41793 static const struct vm_operations_struct *ttm_vm_ops;
41794
41795 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41796@@ -145,8 +145,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
41797 return r;
41798 if (unlikely(ttm_vm_ops == NULL)) {
41799 ttm_vm_ops = vma->vm_ops;
41800+ pax_open_kernel();
41801 qxl_ttm_vm_ops = *ttm_vm_ops;
41802 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
41803+ pax_close_kernel();
41804 }
41805 vma->vm_ops = &qxl_ttm_vm_ops;
41806 return 0;
41807@@ -464,25 +466,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
41808 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
41809 {
41810 #if defined(CONFIG_DEBUG_FS)
41811- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
41812- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
41813- unsigned i;
41814+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
41815+ {
41816+ .name = "qxl_mem_mm",
41817+ .show = &qxl_mm_dump_table,
41818+ },
41819+ {
41820+ .name = "qxl_surf_mm",
41821+ .show = &qxl_mm_dump_table,
41822+ }
41823+ };
41824
41825- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
41826- if (i == 0)
41827- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
41828- else
41829- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
41830- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
41831- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
41832- qxl_mem_types_list[i].driver_features = 0;
41833- if (i == 0)
41834- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41835- else
41836- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41837+ pax_open_kernel();
41838+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41839+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41840+ pax_close_kernel();
41841
41842- }
41843- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
41844+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
41845 #else
41846 return 0;
41847 #endif
41848diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
41849index 2c45ac9..5d740f8 100644
41850--- a/drivers/gpu/drm/r128/r128_cce.c
41851+++ b/drivers/gpu/drm/r128/r128_cce.c
41852@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
41853
41854 /* GH: Simple idle check.
41855 */
41856- atomic_set(&dev_priv->idle_count, 0);
41857+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41858
41859 /* We don't support anything other than bus-mastering ring mode,
41860 * but the ring can be in either AGP or PCI space for the ring
41861diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
41862index 723e5d6..102dbaf 100644
41863--- a/drivers/gpu/drm/r128/r128_drv.h
41864+++ b/drivers/gpu/drm/r128/r128_drv.h
41865@@ -93,14 +93,14 @@ typedef struct drm_r128_private {
41866 int is_pci;
41867 unsigned long cce_buffers_offset;
41868
41869- atomic_t idle_count;
41870+ atomic_unchecked_t idle_count;
41871
41872 int page_flipping;
41873 int current_page;
41874 u32 crtc_offset;
41875 u32 crtc_offset_cntl;
41876
41877- atomic_t vbl_received;
41878+ atomic_unchecked_t vbl_received;
41879
41880 u32 color_fmt;
41881 unsigned int front_offset;
41882diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
41883index 663f38c..ec159a1 100644
41884--- a/drivers/gpu/drm/r128/r128_ioc32.c
41885+++ b/drivers/gpu/drm/r128/r128_ioc32.c
41886@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
41887 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
41888 }
41889
41890-drm_ioctl_compat_t *r128_compat_ioctls[] = {
41891+drm_ioctl_compat_t r128_compat_ioctls[] = {
41892 [DRM_R128_INIT] = compat_r128_init,
41893 [DRM_R128_DEPTH] = compat_r128_depth,
41894 [DRM_R128_STIPPLE] = compat_r128_stipple,
41895@@ -197,17 +197,13 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
41896 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41897 {
41898 unsigned int nr = DRM_IOCTL_NR(cmd);
41899- drm_ioctl_compat_t *fn = NULL;
41900 int ret;
41901
41902 if (nr < DRM_COMMAND_BASE)
41903 return drm_compat_ioctl(filp, cmd, arg);
41904
41905- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
41906- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41907-
41908- if (fn != NULL)
41909- ret = (*fn) (filp, cmd, arg);
41910+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls) && r128_compat_ioctls[nr - DRM_COMMAND_BASE])
41911+ ret = (*r128_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
41912 else
41913 ret = drm_ioctl(filp, cmd, arg);
41914
41915diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
41916index c2ae496..30b5993 100644
41917--- a/drivers/gpu/drm/r128/r128_irq.c
41918+++ b/drivers/gpu/drm/r128/r128_irq.c
41919@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
41920 if (crtc != 0)
41921 return 0;
41922
41923- return atomic_read(&dev_priv->vbl_received);
41924+ return atomic_read_unchecked(&dev_priv->vbl_received);
41925 }
41926
41927 irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41928@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41929 /* VBLANK interrupt */
41930 if (status & R128_CRTC_VBLANK_INT) {
41931 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
41932- atomic_inc(&dev_priv->vbl_received);
41933+ atomic_inc_unchecked(&dev_priv->vbl_received);
41934 drm_handle_vblank(dev, 0);
41935 return IRQ_HANDLED;
41936 }
41937diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
41938index 8fd2d9f..18c9660 100644
41939--- a/drivers/gpu/drm/r128/r128_state.c
41940+++ b/drivers/gpu/drm/r128/r128_state.c
41941@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
41942
41943 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
41944 {
41945- if (atomic_read(&dev_priv->idle_count) == 0)
41946+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
41947 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
41948 else
41949- atomic_set(&dev_priv->idle_count, 0);
41950+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41951 }
41952
41953 #endif
41954diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
41955index b928c17..e5d9400 100644
41956--- a/drivers/gpu/drm/radeon/mkregtable.c
41957+++ b/drivers/gpu/drm/radeon/mkregtable.c
41958@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
41959 regex_t mask_rex;
41960 regmatch_t match[4];
41961 char buf[1024];
41962- size_t end;
41963+ long end;
41964 int len;
41965 int done = 0;
41966 int r;
41967 unsigned o;
41968 struct offset *offset;
41969 char last_reg_s[10];
41970- int last_reg;
41971+ unsigned long last_reg;
41972
41973 if (regcomp
41974 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
41975diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
41976index bd7519f..e1c2cd95 100644
41977--- a/drivers/gpu/drm/radeon/radeon_device.c
41978+++ b/drivers/gpu/drm/radeon/radeon_device.c
41979@@ -1247,7 +1247,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
41980 * locking inversion with the driver load path. And the access here is
41981 * completely racy anyway. So don't bother with locking for now.
41982 */
41983- return dev->open_count == 0;
41984+ return local_read(&dev->open_count) == 0;
41985 }
41986
41987 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
41988diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
41989index 46bd393..6ae4719 100644
41990--- a/drivers/gpu/drm/radeon/radeon_drv.h
41991+++ b/drivers/gpu/drm/radeon/radeon_drv.h
41992@@ -264,7 +264,7 @@ typedef struct drm_radeon_private {
41993
41994 /* SW interrupt */
41995 wait_queue_head_t swi_queue;
41996- atomic_t swi_emitted;
41997+ atomic_unchecked_t swi_emitted;
41998 int vblank_crtc;
41999 uint32_t irq_enable_reg;
42000 uint32_t r500_disp_irq_reg;
42001diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
42002index 0b98ea1..a3c770f 100644
42003--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
42004+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
42005@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
42006 request = compat_alloc_user_space(sizeof(*request));
42007 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
42008 || __put_user(req32.param, &request->param)
42009- || __put_user((void __user *)(unsigned long)req32.value,
42010+ || __put_user((unsigned long)req32.value,
42011 &request->value))
42012 return -EFAULT;
42013
42014@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
42015 #define compat_radeon_cp_setparam NULL
42016 #endif /* X86_64 || IA64 */
42017
42018-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
42019+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
42020 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
42021 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
42022 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
42023@@ -393,17 +393,13 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
42024 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42025 {
42026 unsigned int nr = DRM_IOCTL_NR(cmd);
42027- drm_ioctl_compat_t *fn = NULL;
42028 int ret;
42029
42030 if (nr < DRM_COMMAND_BASE)
42031 return drm_compat_ioctl(filp, cmd, arg);
42032
42033- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
42034- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
42035-
42036- if (fn != NULL)
42037- ret = (*fn) (filp, cmd, arg);
42038+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls) && radeon_compat_ioctls[nr - DRM_COMMAND_BASE])
42039+ ret = (*radeon_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
42040 else
42041 ret = drm_ioctl(filp, cmd, arg);
42042
42043diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
42044index 244b19b..c19226d 100644
42045--- a/drivers/gpu/drm/radeon/radeon_irq.c
42046+++ b/drivers/gpu/drm/radeon/radeon_irq.c
42047@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
42048 unsigned int ret;
42049 RING_LOCALS;
42050
42051- atomic_inc(&dev_priv->swi_emitted);
42052- ret = atomic_read(&dev_priv->swi_emitted);
42053+ atomic_inc_unchecked(&dev_priv->swi_emitted);
42054+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
42055
42056 BEGIN_RING(4);
42057 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
42058@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
42059 drm_radeon_private_t *dev_priv =
42060 (drm_radeon_private_t *) dev->dev_private;
42061
42062- atomic_set(&dev_priv->swi_emitted, 0);
42063+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
42064 init_waitqueue_head(&dev_priv->swi_queue);
42065
42066 dev->max_vblank_count = 0x001fffff;
42067diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
42068index 15aee72..cda326e 100644
42069--- a/drivers/gpu/drm/radeon/radeon_state.c
42070+++ b/drivers/gpu/drm/radeon/radeon_state.c
42071@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
42072 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
42073 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
42074
42075- if (copy_from_user(&depth_boxes, clear->depth_boxes,
42076+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || copy_from_user(&depth_boxes, clear->depth_boxes,
42077 sarea_priv->nbox * sizeof(depth_boxes[0])))
42078 return -EFAULT;
42079
42080@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
42081 {
42082 drm_radeon_private_t *dev_priv = dev->dev_private;
42083 drm_radeon_getparam_t *param = data;
42084- int value;
42085+ int value = 0;
42086
42087 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
42088
42089diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
42090index edafd3c..3af7c9c 100644
42091--- a/drivers/gpu/drm/radeon/radeon_ttm.c
42092+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
42093@@ -961,7 +961,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
42094 man->size = size >> PAGE_SHIFT;
42095 }
42096
42097-static struct vm_operations_struct radeon_ttm_vm_ops;
42098+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
42099 static const struct vm_operations_struct *ttm_vm_ops = NULL;
42100
42101 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42102@@ -1002,8 +1002,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
42103 }
42104 if (unlikely(ttm_vm_ops == NULL)) {
42105 ttm_vm_ops = vma->vm_ops;
42106+ pax_open_kernel();
42107 radeon_ttm_vm_ops = *ttm_vm_ops;
42108 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
42109+ pax_close_kernel();
42110 }
42111 vma->vm_ops = &radeon_ttm_vm_ops;
42112 return 0;
42113diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
42114index 1a52522..8e78043 100644
42115--- a/drivers/gpu/drm/tegra/dc.c
42116+++ b/drivers/gpu/drm/tegra/dc.c
42117@@ -1585,7 +1585,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
42118 }
42119
42120 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
42121- dc->debugfs_files[i].data = dc;
42122+ *(void **)&dc->debugfs_files[i].data = dc;
42123
42124 err = drm_debugfs_create_files(dc->debugfs_files,
42125 ARRAY_SIZE(debugfs_files),
42126diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
42127index ed970f6..4eeea42 100644
42128--- a/drivers/gpu/drm/tegra/dsi.c
42129+++ b/drivers/gpu/drm/tegra/dsi.c
42130@@ -62,7 +62,7 @@ struct tegra_dsi {
42131 struct clk *clk_lp;
42132 struct clk *clk;
42133
42134- struct drm_info_list *debugfs_files;
42135+ drm_info_list_no_const *debugfs_files;
42136 struct drm_minor *minor;
42137 struct dentry *debugfs;
42138
42139diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
42140index 7eaaee74..cc2bc04 100644
42141--- a/drivers/gpu/drm/tegra/hdmi.c
42142+++ b/drivers/gpu/drm/tegra/hdmi.c
42143@@ -64,7 +64,7 @@ struct tegra_hdmi {
42144 bool stereo;
42145 bool dvi;
42146
42147- struct drm_info_list *debugfs_files;
42148+ drm_info_list_no_const *debugfs_files;
42149 struct drm_minor *minor;
42150 struct dentry *debugfs;
42151 };
42152diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
42153index aa0bd054..aea6a01 100644
42154--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
42155+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
42156@@ -148,10 +148,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
42157 }
42158
42159 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
42160- ttm_bo_man_init,
42161- ttm_bo_man_takedown,
42162- ttm_bo_man_get_node,
42163- ttm_bo_man_put_node,
42164- ttm_bo_man_debug
42165+ .init = ttm_bo_man_init,
42166+ .takedown = ttm_bo_man_takedown,
42167+ .get_node = ttm_bo_man_get_node,
42168+ .put_node = ttm_bo_man_put_node,
42169+ .debug = ttm_bo_man_debug
42170 };
42171 EXPORT_SYMBOL(ttm_bo_manager_func);
42172diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
42173index a1803fb..c53f6b0 100644
42174--- a/drivers/gpu/drm/ttm/ttm_memory.c
42175+++ b/drivers/gpu/drm/ttm/ttm_memory.c
42176@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
42177 zone->glob = glob;
42178 glob->zone_kernel = zone;
42179 ret = kobject_init_and_add(
42180- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
42181+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
42182 if (unlikely(ret != 0)) {
42183 kobject_put(&zone->kobj);
42184 return ret;
42185@@ -348,7 +348,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
42186 zone->glob = glob;
42187 glob->zone_dma32 = zone;
42188 ret = kobject_init_and_add(
42189- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
42190+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
42191 if (unlikely(ret != 0)) {
42192 kobject_put(&zone->kobj);
42193 return ret;
42194diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
42195index 025c429..314062f 100644
42196--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
42197+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
42198@@ -54,7 +54,7 @@
42199
42200 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
42201 #define SMALL_ALLOCATION 16
42202-#define FREE_ALL_PAGES (~0U)
42203+#define FREE_ALL_PAGES (~0UL)
42204 /* times are in msecs */
42205 #define PAGE_FREE_INTERVAL 1000
42206
42207@@ -299,15 +299,14 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
42208 * @free_all: If set to true will free all pages in pool
42209 * @use_static: Safe to use static buffer
42210 **/
42211-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
42212+static unsigned long ttm_page_pool_free(struct ttm_page_pool *pool, unsigned long nr_free,
42213 bool use_static)
42214 {
42215 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
42216 unsigned long irq_flags;
42217 struct page *p;
42218 struct page **pages_to_free;
42219- unsigned freed_pages = 0,
42220- npages_to_free = nr_free;
42221+ unsigned long freed_pages = 0, npages_to_free = nr_free;
42222
42223 if (NUM_PAGES_TO_ALLOC < nr_free)
42224 npages_to_free = NUM_PAGES_TO_ALLOC;
42225@@ -371,7 +370,8 @@ restart:
42226 __list_del(&p->lru, &pool->list);
42227
42228 ttm_pool_update_free_locked(pool, freed_pages);
42229- nr_free -= freed_pages;
42230+ if (likely(nr_free != FREE_ALL_PAGES))
42231+ nr_free -= freed_pages;
42232 }
42233
42234 spin_unlock_irqrestore(&pool->lock, irq_flags);
42235@@ -399,7 +399,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42236 unsigned i;
42237 unsigned pool_offset;
42238 struct ttm_page_pool *pool;
42239- int shrink_pages = sc->nr_to_scan;
42240+ unsigned long shrink_pages = sc->nr_to_scan;
42241 unsigned long freed = 0;
42242
42243 if (!mutex_trylock(&lock))
42244@@ -407,7 +407,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42245 pool_offset = ++start_pool % NUM_POOLS;
42246 /* select start pool in round robin fashion */
42247 for (i = 0; i < NUM_POOLS; ++i) {
42248- unsigned nr_free = shrink_pages;
42249+ unsigned long nr_free = shrink_pages;
42250 if (shrink_pages == 0)
42251 break;
42252 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
42253@@ -673,7 +673,7 @@ out:
42254 }
42255
42256 /* Put all pages in pages list to correct pool to wait for reuse */
42257-static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
42258+static void ttm_put_pages(struct page **pages, unsigned long npages, int flags,
42259 enum ttm_caching_state cstate)
42260 {
42261 unsigned long irq_flags;
42262@@ -728,7 +728,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
42263 struct list_head plist;
42264 struct page *p = NULL;
42265 gfp_t gfp_flags = GFP_USER;
42266- unsigned count;
42267+ unsigned long count;
42268 int r;
42269
42270 /* set zero flag for page allocation if required */
42271diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
42272index 01e1d27..aaa018a 100644
42273--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
42274+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
42275@@ -56,7 +56,7 @@
42276
42277 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
42278 #define SMALL_ALLOCATION 4
42279-#define FREE_ALL_PAGES (~0U)
42280+#define FREE_ALL_PAGES (~0UL)
42281 /* times are in msecs */
42282 #define IS_UNDEFINED (0)
42283 #define IS_WC (1<<1)
42284@@ -413,7 +413,7 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
42285 * @nr_free: If set to true will free all pages in pool
42286 * @use_static: Safe to use static buffer
42287 **/
42288-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
42289+static unsigned long ttm_dma_page_pool_free(struct dma_pool *pool, unsigned long nr_free,
42290 bool use_static)
42291 {
42292 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
42293@@ -421,8 +421,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
42294 struct dma_page *dma_p, *tmp;
42295 struct page **pages_to_free;
42296 struct list_head d_pages;
42297- unsigned freed_pages = 0,
42298- npages_to_free = nr_free;
42299+ unsigned long freed_pages = 0, npages_to_free = nr_free;
42300
42301 if (NUM_PAGES_TO_ALLOC < nr_free)
42302 npages_to_free = NUM_PAGES_TO_ALLOC;
42303@@ -499,7 +498,8 @@ restart:
42304 /* remove range of pages from the pool */
42305 if (freed_pages) {
42306 ttm_pool_update_free_locked(pool, freed_pages);
42307- nr_free -= freed_pages;
42308+ if (likely(nr_free != FREE_ALL_PAGES))
42309+ nr_free -= freed_pages;
42310 }
42311
42312 spin_unlock_irqrestore(&pool->lock, irq_flags);
42313@@ -936,7 +936,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
42314 struct dma_page *d_page, *next;
42315 enum pool_type type;
42316 bool is_cached = false;
42317- unsigned count = 0, i, npages = 0;
42318+ unsigned long count = 0, i, npages = 0;
42319 unsigned long irq_flags;
42320
42321 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
42322@@ -1012,7 +1012,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42323 static unsigned start_pool;
42324 unsigned idx = 0;
42325 unsigned pool_offset;
42326- unsigned shrink_pages = sc->nr_to_scan;
42327+ unsigned long shrink_pages = sc->nr_to_scan;
42328 struct device_pools *p;
42329 unsigned long freed = 0;
42330
42331@@ -1025,7 +1025,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42332 goto out;
42333 pool_offset = ++start_pool % _manager->npools;
42334 list_for_each_entry(p, &_manager->pools, pools) {
42335- unsigned nr_free;
42336+ unsigned long nr_free;
42337
42338 if (!p->dev)
42339 continue;
42340@@ -1039,7 +1039,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42341 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
42342 freed += nr_free - shrink_pages;
42343
42344- pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
42345+ pr_debug("%s: (%s:%d) Asked to shrink %lu, have %lu more to go\n",
42346 p->pool->dev_name, p->pool->name, current->pid,
42347 nr_free, shrink_pages);
42348 }
42349diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
42350index 5fc16ce..1bd84ec 100644
42351--- a/drivers/gpu/drm/udl/udl_fb.c
42352+++ b/drivers/gpu/drm/udl/udl_fb.c
42353@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
42354 fb_deferred_io_cleanup(info);
42355 kfree(info->fbdefio);
42356 info->fbdefio = NULL;
42357- info->fbops->fb_mmap = udl_fb_mmap;
42358 }
42359
42360 pr_warn("released /dev/fb%d user=%d count=%d\n",
42361diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
42362index ef8c500..01030c8 100644
42363--- a/drivers/gpu/drm/via/via_drv.h
42364+++ b/drivers/gpu/drm/via/via_drv.h
42365@@ -53,7 +53,7 @@ typedef struct drm_via_ring_buffer {
42366 typedef uint32_t maskarray_t[5];
42367
42368 typedef struct drm_via_irq {
42369- atomic_t irq_received;
42370+ atomic_unchecked_t irq_received;
42371 uint32_t pending_mask;
42372 uint32_t enable_mask;
42373 wait_queue_head_t irq_queue;
42374@@ -77,7 +77,7 @@ typedef struct drm_via_private {
42375 struct timeval last_vblank;
42376 int last_vblank_valid;
42377 unsigned usec_per_vblank;
42378- atomic_t vbl_received;
42379+ atomic_unchecked_t vbl_received;
42380 drm_via_state_t hc_state;
42381 char pci_buf[VIA_PCI_BUF_SIZE];
42382 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
42383diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
42384index 1319433..a993b0c 100644
42385--- a/drivers/gpu/drm/via/via_irq.c
42386+++ b/drivers/gpu/drm/via/via_irq.c
42387@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
42388 if (crtc != 0)
42389 return 0;
42390
42391- return atomic_read(&dev_priv->vbl_received);
42392+ return atomic_read_unchecked(&dev_priv->vbl_received);
42393 }
42394
42395 irqreturn_t via_driver_irq_handler(int irq, void *arg)
42396@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
42397
42398 status = VIA_READ(VIA_REG_INTERRUPT);
42399 if (status & VIA_IRQ_VBLANK_PENDING) {
42400- atomic_inc(&dev_priv->vbl_received);
42401- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
42402+ atomic_inc_unchecked(&dev_priv->vbl_received);
42403+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
42404 do_gettimeofday(&cur_vblank);
42405 if (dev_priv->last_vblank_valid) {
42406 dev_priv->usec_per_vblank =
42407@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
42408 dev_priv->last_vblank = cur_vblank;
42409 dev_priv->last_vblank_valid = 1;
42410 }
42411- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
42412+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
42413 DRM_DEBUG("US per vblank is: %u\n",
42414 dev_priv->usec_per_vblank);
42415 }
42416@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
42417
42418 for (i = 0; i < dev_priv->num_irqs; ++i) {
42419 if (status & cur_irq->pending_mask) {
42420- atomic_inc(&cur_irq->irq_received);
42421+ atomic_inc_unchecked(&cur_irq->irq_received);
42422 wake_up(&cur_irq->irq_queue);
42423 handled = 1;
42424 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
42425@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
42426 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
42427 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
42428 masks[irq][4]));
42429- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
42430+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
42431 } else {
42432 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
42433 (((cur_irq_sequence =
42434- atomic_read(&cur_irq->irq_received)) -
42435+ atomic_read_unchecked(&cur_irq->irq_received)) -
42436 *sequence) <= (1 << 23)));
42437 }
42438 *sequence = cur_irq_sequence;
42439@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
42440 }
42441
42442 for (i = 0; i < dev_priv->num_irqs; ++i) {
42443- atomic_set(&cur_irq->irq_received, 0);
42444+ atomic_set_unchecked(&cur_irq->irq_received, 0);
42445 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
42446 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
42447 init_waitqueue_head(&cur_irq->irq_queue);
42448@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
42449 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
42450 case VIA_IRQ_RELATIVE:
42451 irqwait->request.sequence +=
42452- atomic_read(&cur_irq->irq_received);
42453+ atomic_read_unchecked(&cur_irq->irq_received);
42454 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
42455 case VIA_IRQ_ABSOLUTE:
42456 break;
42457diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
42458index d26a6da..5fa41ed 100644
42459--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
42460+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
42461@@ -447,7 +447,7 @@ struct vmw_private {
42462 * Fencing and IRQs.
42463 */
42464
42465- atomic_t marker_seq;
42466+ atomic_unchecked_t marker_seq;
42467 wait_queue_head_t fence_queue;
42468 wait_queue_head_t fifo_queue;
42469 spinlock_t waiter_lock;
42470diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
42471index 39f2b03..d1b0a64 100644
42472--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
42473+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
42474@@ -152,7 +152,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
42475 (unsigned int) min,
42476 (unsigned int) fifo->capabilities);
42477
42478- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
42479+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
42480 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
42481 vmw_marker_queue_init(&fifo->marker_queue);
42482 return vmw_fifo_send_fence(dev_priv, &dummy);
42483@@ -372,7 +372,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
42484 if (reserveable)
42485 iowrite32(bytes, fifo_mem +
42486 SVGA_FIFO_RESERVED);
42487- return fifo_mem + (next_cmd >> 2);
42488+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
42489 } else {
42490 need_bounce = true;
42491 }
42492@@ -492,7 +492,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
42493
42494 fm = vmw_fifo_reserve(dev_priv, bytes);
42495 if (unlikely(fm == NULL)) {
42496- *seqno = atomic_read(&dev_priv->marker_seq);
42497+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
42498 ret = -ENOMEM;
42499 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
42500 false, 3*HZ);
42501@@ -500,7 +500,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
42502 }
42503
42504 do {
42505- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
42506+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
42507 } while (*seqno == 0);
42508
42509 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
42510diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
42511index 170b61b..fec7348 100644
42512--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
42513+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
42514@@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
42515 }
42516
42517 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
42518- vmw_gmrid_man_init,
42519- vmw_gmrid_man_takedown,
42520- vmw_gmrid_man_get_node,
42521- vmw_gmrid_man_put_node,
42522- vmw_gmrid_man_debug
42523+ .init = vmw_gmrid_man_init,
42524+ .takedown = vmw_gmrid_man_takedown,
42525+ .get_node = vmw_gmrid_man_get_node,
42526+ .put_node = vmw_gmrid_man_put_node,
42527+ .debug = vmw_gmrid_man_debug
42528 };
42529diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
42530index 69c8ce2..cacb0ab 100644
42531--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
42532+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
42533@@ -235,7 +235,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
42534 int ret;
42535
42536 num_clips = arg->num_clips;
42537- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
42538+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
42539
42540 if (unlikely(num_clips == 0))
42541 return 0;
42542@@ -318,7 +318,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
42543 int ret;
42544
42545 num_clips = arg->num_clips;
42546- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
42547+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
42548
42549 if (unlikely(num_clips == 0))
42550 return 0;
42551diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
42552index 9fe9827..0aa2fc0 100644
42553--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
42554+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
42555@@ -102,7 +102,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
42556 * emitted. Then the fence is stale and signaled.
42557 */
42558
42559- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
42560+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
42561 > VMW_FENCE_WRAP);
42562
42563 return ret;
42564@@ -133,7 +133,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
42565
42566 if (fifo_idle)
42567 down_read(&fifo_state->rwsem);
42568- signal_seq = atomic_read(&dev_priv->marker_seq);
42569+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
42570 ret = 0;
42571
42572 for (;;) {
42573diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42574index efd1ffd..0ae13ca 100644
42575--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42576+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42577@@ -135,7 +135,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
42578 while (!vmw_lag_lt(queue, us)) {
42579 spin_lock(&queue->lock);
42580 if (list_empty(&queue->head))
42581- seqno = atomic_read(&dev_priv->marker_seq);
42582+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
42583 else {
42584 marker = list_first_entry(&queue->head,
42585 struct vmw_marker, head);
42586diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
42587index 37ac7b5..d52a5c9 100644
42588--- a/drivers/gpu/vga/vga_switcheroo.c
42589+++ b/drivers/gpu/vga/vga_switcheroo.c
42590@@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
42591
42592 /* this version is for the case where the power switch is separate
42593 to the device being powered down. */
42594-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
42595+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
42596 {
42597 /* copy over all the bus versions */
42598 if (dev->bus && dev->bus->pm) {
42599@@ -695,7 +695,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
42600 return ret;
42601 }
42602
42603-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
42604+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
42605 {
42606 /* copy over all the bus versions */
42607 if (dev->bus && dev->bus->pm) {
42608diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
42609index 56ce8c2..32ce524 100644
42610--- a/drivers/hid/hid-core.c
42611+++ b/drivers/hid/hid-core.c
42612@@ -2531,7 +2531,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
42613
42614 int hid_add_device(struct hid_device *hdev)
42615 {
42616- static atomic_t id = ATOMIC_INIT(0);
42617+ static atomic_unchecked_t id = ATOMIC_INIT(0);
42618 int ret;
42619
42620 if (WARN_ON(hdev->status & HID_STAT_ADDED))
42621@@ -2574,7 +2574,7 @@ int hid_add_device(struct hid_device *hdev)
42622 /* XXX hack, any other cleaner solution after the driver core
42623 * is converted to allow more than 20 bytes as the device name? */
42624 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
42625- hdev->vendor, hdev->product, atomic_inc_return(&id));
42626+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
42627
42628 hid_debug_register(hdev, dev_name(&hdev->dev));
42629 ret = device_add(&hdev->dev);
42630diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
42631index c13fb5b..55a3802 100644
42632--- a/drivers/hid/hid-wiimote-debug.c
42633+++ b/drivers/hid/hid-wiimote-debug.c
42634@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
42635 else if (size == 0)
42636 return -EIO;
42637
42638- if (copy_to_user(u, buf, size))
42639+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
42640 return -EFAULT;
42641
42642 *off += size;
42643diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
42644index 00bc30e..d8e5097 100644
42645--- a/drivers/hv/channel.c
42646+++ b/drivers/hv/channel.c
42647@@ -370,7 +370,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
42648 int ret = 0;
42649
42650 next_gpadl_handle =
42651- (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
42652+ (atomic_inc_return_unchecked(&vmbus_connection.next_gpadl_handle) - 1);
42653
42654 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
42655 if (ret)
42656diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
42657index 50e51a5..b0bfd78 100644
42658--- a/drivers/hv/hv.c
42659+++ b/drivers/hv/hv.c
42660@@ -118,7 +118,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
42661 u64 output_address = (output) ? virt_to_phys(output) : 0;
42662 u32 output_address_hi = output_address >> 32;
42663 u32 output_address_lo = output_address & 0xFFFFFFFF;
42664- void *hypercall_page = hv_context.hypercall_page;
42665+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
42666
42667 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
42668 "=a"(hv_status_lo) : "d" (control_hi),
42669@@ -164,7 +164,7 @@ int hv_init(void)
42670 /* See if the hypercall page is already set */
42671 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
42672
42673- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
42674+ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
42675
42676 if (!virtaddr)
42677 goto cleanup;
42678diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
42679index ff16938..e60879c 100644
42680--- a/drivers/hv/hv_balloon.c
42681+++ b/drivers/hv/hv_balloon.c
42682@@ -470,7 +470,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
42683
42684 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
42685 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
42686-static atomic_t trans_id = ATOMIC_INIT(0);
42687+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
42688
42689 static int dm_ring_size = (5 * PAGE_SIZE);
42690
42691@@ -947,7 +947,7 @@ static void hot_add_req(struct work_struct *dummy)
42692 pr_info("Memory hot add failed\n");
42693
42694 dm->state = DM_INITIALIZED;
42695- resp.hdr.trans_id = atomic_inc_return(&trans_id);
42696+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42697 vmbus_sendpacket(dm->dev->channel, &resp,
42698 sizeof(struct dm_hot_add_response),
42699 (unsigned long)NULL,
42700@@ -1028,7 +1028,7 @@ static void post_status(struct hv_dynmem_device *dm)
42701 memset(&status, 0, sizeof(struct dm_status));
42702 status.hdr.type = DM_STATUS_REPORT;
42703 status.hdr.size = sizeof(struct dm_status);
42704- status.hdr.trans_id = atomic_inc_return(&trans_id);
42705+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42706
42707 /*
42708 * The host expects the guest to report free memory.
42709@@ -1048,7 +1048,7 @@ static void post_status(struct hv_dynmem_device *dm)
42710 * send the status. This can happen if we were interrupted
42711 * after we picked our transaction ID.
42712 */
42713- if (status.hdr.trans_id != atomic_read(&trans_id))
42714+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
42715 return;
42716
42717 /*
42718@@ -1188,7 +1188,7 @@ static void balloon_up(struct work_struct *dummy)
42719 */
42720
42721 do {
42722- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
42723+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42724 ret = vmbus_sendpacket(dm_device.dev->channel,
42725 bl_resp,
42726 bl_resp->hdr.size,
42727@@ -1234,7 +1234,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
42728
42729 memset(&resp, 0, sizeof(struct dm_unballoon_response));
42730 resp.hdr.type = DM_UNBALLOON_RESPONSE;
42731- resp.hdr.trans_id = atomic_inc_return(&trans_id);
42732+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42733 resp.hdr.size = sizeof(struct dm_unballoon_response);
42734
42735 vmbus_sendpacket(dm_device.dev->channel, &resp,
42736@@ -1295,7 +1295,7 @@ static void version_resp(struct hv_dynmem_device *dm,
42737 memset(&version_req, 0, sizeof(struct dm_version_request));
42738 version_req.hdr.type = DM_VERSION_REQUEST;
42739 version_req.hdr.size = sizeof(struct dm_version_request);
42740- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42741+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42742 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
42743 version_req.is_last_attempt = 1;
42744
42745@@ -1468,7 +1468,7 @@ static int balloon_probe(struct hv_device *dev,
42746 memset(&version_req, 0, sizeof(struct dm_version_request));
42747 version_req.hdr.type = DM_VERSION_REQUEST;
42748 version_req.hdr.size = sizeof(struct dm_version_request);
42749- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42750+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42751 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
42752 version_req.is_last_attempt = 0;
42753
42754@@ -1499,7 +1499,7 @@ static int balloon_probe(struct hv_device *dev,
42755 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
42756 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
42757 cap_msg.hdr.size = sizeof(struct dm_capabilities);
42758- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
42759+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42760
42761 cap_msg.caps.cap_bits.balloon = 1;
42762 cap_msg.caps.cap_bits.hot_add = 1;
42763diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
42764index 44b1c94..6dccc2c 100644
42765--- a/drivers/hv/hyperv_vmbus.h
42766+++ b/drivers/hv/hyperv_vmbus.h
42767@@ -632,7 +632,7 @@ enum vmbus_connect_state {
42768 struct vmbus_connection {
42769 enum vmbus_connect_state conn_state;
42770
42771- atomic_t next_gpadl_handle;
42772+ atomic_unchecked_t next_gpadl_handle;
42773
42774 /*
42775 * Represents channel interrupts. Each bit position represents a
42776diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
42777index f518b8d7..4bc0b64 100644
42778--- a/drivers/hv/vmbus_drv.c
42779+++ b/drivers/hv/vmbus_drv.c
42780@@ -840,10 +840,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
42781 {
42782 int ret = 0;
42783
42784- static atomic_t device_num = ATOMIC_INIT(0);
42785+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
42786
42787 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
42788- atomic_inc_return(&device_num));
42789+ atomic_inc_return_unchecked(&device_num));
42790
42791 child_device_obj->device.bus = &hv_bus;
42792 child_device_obj->device.parent = &hv_acpi_dev->dev;
42793diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
42794index 579bdf9..0dac21d5 100644
42795--- a/drivers/hwmon/acpi_power_meter.c
42796+++ b/drivers/hwmon/acpi_power_meter.c
42797@@ -116,7 +116,7 @@ struct sensor_template {
42798 struct device_attribute *devattr,
42799 const char *buf, size_t count);
42800 int index;
42801-};
42802+} __do_const;
42803
42804 /* Averaging interval */
42805 static int update_avg_interval(struct acpi_power_meter_resource *resource)
42806@@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
42807 struct sensor_template *attrs)
42808 {
42809 struct device *dev = &resource->acpi_dev->dev;
42810- struct sensor_device_attribute *sensors =
42811+ sensor_device_attribute_no_const *sensors =
42812 &resource->sensors[resource->num_sensors];
42813 int res = 0;
42814
42815@@ -973,7 +973,7 @@ static int __init enable_cap_knobs(const struct dmi_system_id *d)
42816 return 0;
42817 }
42818
42819-static struct dmi_system_id __initdata pm_dmi_table[] = {
42820+static const struct dmi_system_id __initconst pm_dmi_table[] = {
42821 {
42822 enable_cap_knobs, "IBM Active Energy Manager",
42823 {
42824diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
42825index 0af63da..05a183a 100644
42826--- a/drivers/hwmon/applesmc.c
42827+++ b/drivers/hwmon/applesmc.c
42828@@ -1105,7 +1105,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
42829 {
42830 struct applesmc_node_group *grp;
42831 struct applesmc_dev_attr *node;
42832- struct attribute *attr;
42833+ attribute_no_const *attr;
42834 int ret, i;
42835
42836 for (grp = groups; grp->format; grp++) {
42837diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
42838index cccef87..06ce8ec 100644
42839--- a/drivers/hwmon/asus_atk0110.c
42840+++ b/drivers/hwmon/asus_atk0110.c
42841@@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
42842 struct atk_sensor_data {
42843 struct list_head list;
42844 struct atk_data *data;
42845- struct device_attribute label_attr;
42846- struct device_attribute input_attr;
42847- struct device_attribute limit1_attr;
42848- struct device_attribute limit2_attr;
42849+ device_attribute_no_const label_attr;
42850+ device_attribute_no_const input_attr;
42851+ device_attribute_no_const limit1_attr;
42852+ device_attribute_no_const limit2_attr;
42853 char label_attr_name[ATTR_NAME_SIZE];
42854 char input_attr_name[ATTR_NAME_SIZE];
42855 char limit1_attr_name[ATTR_NAME_SIZE];
42856@@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev,
42857 static struct device_attribute atk_name_attr =
42858 __ATTR(name, 0444, atk_name_show, NULL);
42859
42860-static void atk_init_attribute(struct device_attribute *attr, char *name,
42861+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
42862 sysfs_show_func show)
42863 {
42864 sysfs_attr_init(&attr->attr);
42865diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
42866index 5b7fec8..05c957a 100644
42867--- a/drivers/hwmon/coretemp.c
42868+++ b/drivers/hwmon/coretemp.c
42869@@ -783,7 +783,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
42870 return NOTIFY_OK;
42871 }
42872
42873-static struct notifier_block coretemp_cpu_notifier __refdata = {
42874+static struct notifier_block coretemp_cpu_notifier = {
42875 .notifier_call = coretemp_cpu_callback,
42876 };
42877
42878diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
42879index 7a8a6fb..015c1fd 100644
42880--- a/drivers/hwmon/ibmaem.c
42881+++ b/drivers/hwmon/ibmaem.c
42882@@ -924,7 +924,7 @@ static int aem_register_sensors(struct aem_data *data,
42883 struct aem_rw_sensor_template *rw)
42884 {
42885 struct device *dev = &data->pdev->dev;
42886- struct sensor_device_attribute *sensors = data->sensors;
42887+ sensor_device_attribute_no_const *sensors = data->sensors;
42888 int err;
42889
42890 /* Set up read-only sensors */
42891diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
42892index 17ae2eb..21b71dd 100644
42893--- a/drivers/hwmon/iio_hwmon.c
42894+++ b/drivers/hwmon/iio_hwmon.c
42895@@ -61,7 +61,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
42896 {
42897 struct device *dev = &pdev->dev;
42898 struct iio_hwmon_state *st;
42899- struct sensor_device_attribute *a;
42900+ sensor_device_attribute_no_const *a;
42901 int ret, i;
42902 int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1;
42903 enum iio_chan_type type;
42904diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
42905index f3830db..9f4d6d5 100644
42906--- a/drivers/hwmon/nct6683.c
42907+++ b/drivers/hwmon/nct6683.c
42908@@ -397,11 +397,11 @@ static struct attribute_group *
42909 nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42910 int repeat)
42911 {
42912- struct sensor_device_attribute_2 *a2;
42913- struct sensor_device_attribute *a;
42914+ sensor_device_attribute_2_no_const *a2;
42915+ sensor_device_attribute_no_const *a;
42916 struct sensor_device_template **t;
42917 struct sensor_device_attr_u *su;
42918- struct attribute_group *group;
42919+ attribute_group_no_const *group;
42920 struct attribute **attrs;
42921 int i, j, count;
42922
42923diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
42924index 1be4117..88ae1e1 100644
42925--- a/drivers/hwmon/nct6775.c
42926+++ b/drivers/hwmon/nct6775.c
42927@@ -952,10 +952,10 @@ static struct attribute_group *
42928 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42929 int repeat)
42930 {
42931- struct attribute_group *group;
42932+ attribute_group_no_const *group;
42933 struct sensor_device_attr_u *su;
42934- struct sensor_device_attribute *a;
42935- struct sensor_device_attribute_2 *a2;
42936+ sensor_device_attribute_no_const *a;
42937+ sensor_device_attribute_2_no_const *a2;
42938 struct attribute **attrs;
42939 struct sensor_device_template **t;
42940 int i, count;
42941diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
42942index f2e47c7..45d7941 100644
42943--- a/drivers/hwmon/pmbus/pmbus_core.c
42944+++ b/drivers/hwmon/pmbus/pmbus_core.c
42945@@ -816,7 +816,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
42946 return 0;
42947 }
42948
42949-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42950+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
42951 const char *name,
42952 umode_t mode,
42953 ssize_t (*show)(struct device *dev,
42954@@ -833,7 +833,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42955 dev_attr->store = store;
42956 }
42957
42958-static void pmbus_attr_init(struct sensor_device_attribute *a,
42959+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
42960 const char *name,
42961 umode_t mode,
42962 ssize_t (*show)(struct device *dev,
42963@@ -855,7 +855,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
42964 u16 reg, u8 mask)
42965 {
42966 struct pmbus_boolean *boolean;
42967- struct sensor_device_attribute *a;
42968+ sensor_device_attribute_no_const *a;
42969
42970 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
42971 if (!boolean)
42972@@ -880,7 +880,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
42973 bool update, bool readonly)
42974 {
42975 struct pmbus_sensor *sensor;
42976- struct device_attribute *a;
42977+ device_attribute_no_const *a;
42978
42979 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
42980 if (!sensor)
42981@@ -911,7 +911,7 @@ static int pmbus_add_label(struct pmbus_data *data,
42982 const char *lstring, int index)
42983 {
42984 struct pmbus_label *label;
42985- struct device_attribute *a;
42986+ device_attribute_no_const *a;
42987
42988 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
42989 if (!label)
42990diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
42991index d4f0935..7420593 100644
42992--- a/drivers/hwmon/sht15.c
42993+++ b/drivers/hwmon/sht15.c
42994@@ -169,7 +169,7 @@ struct sht15_data {
42995 int supply_uv;
42996 bool supply_uv_valid;
42997 struct work_struct update_supply_work;
42998- atomic_t interrupt_handled;
42999+ atomic_unchecked_t interrupt_handled;
43000 };
43001
43002 /**
43003@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
43004 ret = gpio_direction_input(data->pdata->gpio_data);
43005 if (ret)
43006 return ret;
43007- atomic_set(&data->interrupt_handled, 0);
43008+ atomic_set_unchecked(&data->interrupt_handled, 0);
43009
43010 enable_irq(gpio_to_irq(data->pdata->gpio_data));
43011 if (gpio_get_value(data->pdata->gpio_data) == 0) {
43012 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
43013 /* Only relevant if the interrupt hasn't occurred. */
43014- if (!atomic_read(&data->interrupt_handled))
43015+ if (!atomic_read_unchecked(&data->interrupt_handled))
43016 schedule_work(&data->read_work);
43017 }
43018 ret = wait_event_timeout(data->wait_queue,
43019@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
43020
43021 /* First disable the interrupt */
43022 disable_irq_nosync(irq);
43023- atomic_inc(&data->interrupt_handled);
43024+ atomic_inc_unchecked(&data->interrupt_handled);
43025 /* Then schedule a reading work struct */
43026 if (data->state != SHT15_READING_NOTHING)
43027 schedule_work(&data->read_work);
43028@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
43029 * If not, then start the interrupt again - care here as could
43030 * have gone low in meantime so verify it hasn't!
43031 */
43032- atomic_set(&data->interrupt_handled, 0);
43033+ atomic_set_unchecked(&data->interrupt_handled, 0);
43034 enable_irq(gpio_to_irq(data->pdata->gpio_data));
43035 /* If still not occurred or another handler was scheduled */
43036 if (gpio_get_value(data->pdata->gpio_data)
43037- || atomic_read(&data->interrupt_handled))
43038+ || atomic_read_unchecked(&data->interrupt_handled))
43039 return;
43040 }
43041
43042diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
43043index ac91c07..8e69663 100644
43044--- a/drivers/hwmon/via-cputemp.c
43045+++ b/drivers/hwmon/via-cputemp.c
43046@@ -295,7 +295,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
43047 return NOTIFY_OK;
43048 }
43049
43050-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
43051+static struct notifier_block via_cputemp_cpu_notifier = {
43052 .notifier_call = via_cputemp_cpu_callback,
43053 };
43054
43055diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
43056index 65e3240..e6c511d 100644
43057--- a/drivers/i2c/busses/i2c-amd756-s4882.c
43058+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
43059@@ -39,7 +39,7 @@
43060 extern struct i2c_adapter amd756_smbus;
43061
43062 static struct i2c_adapter *s4882_adapter;
43063-static struct i2c_algorithm *s4882_algo;
43064+static i2c_algorithm_no_const *s4882_algo;
43065
43066 /* Wrapper access functions for multiplexed SMBus */
43067 static DEFINE_MUTEX(amd756_lock);
43068diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
43069index b19a310..d6eece0 100644
43070--- a/drivers/i2c/busses/i2c-diolan-u2c.c
43071+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
43072@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
43073 /* usb layer */
43074
43075 /* Send command to device, and get response. */
43076-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
43077+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
43078 {
43079 int ret = 0;
43080 int actual;
43081diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
43082index 88eda09..cf40434 100644
43083--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
43084+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
43085@@ -37,7 +37,7 @@
43086 extern struct i2c_adapter *nforce2_smbus;
43087
43088 static struct i2c_adapter *s4985_adapter;
43089-static struct i2c_algorithm *s4985_algo;
43090+static i2c_algorithm_no_const *s4985_algo;
43091
43092 /* Wrapper access functions for multiplexed SMBus */
43093 static DEFINE_MUTEX(nforce2_lock);
43094diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
43095index 71c7a39..71dd3e0 100644
43096--- a/drivers/i2c/i2c-dev.c
43097+++ b/drivers/i2c/i2c-dev.c
43098@@ -272,7 +272,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
43099 break;
43100 }
43101
43102- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
43103+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
43104 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
43105 if (IS_ERR(rdwr_pa[i].buf)) {
43106 res = PTR_ERR(rdwr_pa[i].buf);
43107diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
43108index 0b510ba..4fbb5085 100644
43109--- a/drivers/ide/ide-cd.c
43110+++ b/drivers/ide/ide-cd.c
43111@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
43112 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
43113 if ((unsigned long)buf & alignment
43114 || blk_rq_bytes(rq) & q->dma_pad_mask
43115- || object_is_on_stack(buf))
43116+ || object_starts_on_stack(buf))
43117 drive->dma = 0;
43118 }
43119 }
43120diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
43121index 4df97f6..c751151 100644
43122--- a/drivers/iio/industrialio-core.c
43123+++ b/drivers/iio/industrialio-core.c
43124@@ -570,7 +570,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
43125 }
43126
43127 static
43128-int __iio_device_attr_init(struct device_attribute *dev_attr,
43129+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
43130 const char *postfix,
43131 struct iio_chan_spec const *chan,
43132 ssize_t (*readfunc)(struct device *dev,
43133diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
43134index e28a494..f7c2671 100644
43135--- a/drivers/infiniband/core/cm.c
43136+++ b/drivers/infiniband/core/cm.c
43137@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
43138
43139 struct cm_counter_group {
43140 struct kobject obj;
43141- atomic_long_t counter[CM_ATTR_COUNT];
43142+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
43143 };
43144
43145 struct cm_counter_attribute {
43146@@ -1398,7 +1398,7 @@ static void cm_dup_req_handler(struct cm_work *work,
43147 struct ib_mad_send_buf *msg = NULL;
43148 int ret;
43149
43150- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43151+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43152 counter[CM_REQ_COUNTER]);
43153
43154 /* Quick state check to discard duplicate REQs. */
43155@@ -1785,7 +1785,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
43156 if (!cm_id_priv)
43157 return;
43158
43159- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43160+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43161 counter[CM_REP_COUNTER]);
43162 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
43163 if (ret)
43164@@ -1952,7 +1952,7 @@ static int cm_rtu_handler(struct cm_work *work)
43165 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
43166 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
43167 spin_unlock_irq(&cm_id_priv->lock);
43168- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43169+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43170 counter[CM_RTU_COUNTER]);
43171 goto out;
43172 }
43173@@ -2135,7 +2135,7 @@ static int cm_dreq_handler(struct cm_work *work)
43174 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
43175 dreq_msg->local_comm_id);
43176 if (!cm_id_priv) {
43177- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43178+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43179 counter[CM_DREQ_COUNTER]);
43180 cm_issue_drep(work->port, work->mad_recv_wc);
43181 return -EINVAL;
43182@@ -2160,7 +2160,7 @@ static int cm_dreq_handler(struct cm_work *work)
43183 case IB_CM_MRA_REP_RCVD:
43184 break;
43185 case IB_CM_TIMEWAIT:
43186- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43187+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43188 counter[CM_DREQ_COUNTER]);
43189 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
43190 goto unlock;
43191@@ -2174,7 +2174,7 @@ static int cm_dreq_handler(struct cm_work *work)
43192 cm_free_msg(msg);
43193 goto deref;
43194 case IB_CM_DREQ_RCVD:
43195- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43196+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43197 counter[CM_DREQ_COUNTER]);
43198 goto unlock;
43199 default:
43200@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
43201 ib_modify_mad(cm_id_priv->av.port->mad_agent,
43202 cm_id_priv->msg, timeout)) {
43203 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
43204- atomic_long_inc(&work->port->
43205+ atomic_long_inc_unchecked(&work->port->
43206 counter_group[CM_RECV_DUPLICATES].
43207 counter[CM_MRA_COUNTER]);
43208 goto out;
43209@@ -2550,7 +2550,7 @@ static int cm_mra_handler(struct cm_work *work)
43210 break;
43211 case IB_CM_MRA_REQ_RCVD:
43212 case IB_CM_MRA_REP_RCVD:
43213- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43214+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43215 counter[CM_MRA_COUNTER]);
43216 /* fall through */
43217 default:
43218@@ -2712,7 +2712,7 @@ static int cm_lap_handler(struct cm_work *work)
43219 case IB_CM_LAP_IDLE:
43220 break;
43221 case IB_CM_MRA_LAP_SENT:
43222- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43223+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43224 counter[CM_LAP_COUNTER]);
43225 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
43226 goto unlock;
43227@@ -2728,7 +2728,7 @@ static int cm_lap_handler(struct cm_work *work)
43228 cm_free_msg(msg);
43229 goto deref;
43230 case IB_CM_LAP_RCVD:
43231- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43232+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43233 counter[CM_LAP_COUNTER]);
43234 goto unlock;
43235 default:
43236@@ -3012,7 +3012,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
43237 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
43238 if (cur_cm_id_priv) {
43239 spin_unlock_irq(&cm.lock);
43240- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43241+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43242 counter[CM_SIDR_REQ_COUNTER]);
43243 goto out; /* Duplicate message. */
43244 }
43245@@ -3224,10 +3224,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
43246 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
43247 msg->retries = 1;
43248
43249- atomic_long_add(1 + msg->retries,
43250+ atomic_long_add_unchecked(1 + msg->retries,
43251 &port->counter_group[CM_XMIT].counter[attr_index]);
43252 if (msg->retries)
43253- atomic_long_add(msg->retries,
43254+ atomic_long_add_unchecked(msg->retries,
43255 &port->counter_group[CM_XMIT_RETRIES].
43256 counter[attr_index]);
43257
43258@@ -3437,7 +3437,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
43259 }
43260
43261 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
43262- atomic_long_inc(&port->counter_group[CM_RECV].
43263+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
43264 counter[attr_id - CM_ATTR_ID_OFFSET]);
43265
43266 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
43267@@ -3668,7 +3668,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
43268 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
43269
43270 return sprintf(buf, "%ld\n",
43271- atomic_long_read(&group->counter[cm_attr->index]));
43272+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
43273 }
43274
43275 static const struct sysfs_ops cm_counter_ops = {
43276diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
43277index 9f5ad7c..588cd84 100644
43278--- a/drivers/infiniband/core/fmr_pool.c
43279+++ b/drivers/infiniband/core/fmr_pool.c
43280@@ -98,8 +98,8 @@ struct ib_fmr_pool {
43281
43282 struct task_struct *thread;
43283
43284- atomic_t req_ser;
43285- atomic_t flush_ser;
43286+ atomic_unchecked_t req_ser;
43287+ atomic_unchecked_t flush_ser;
43288
43289 wait_queue_head_t force_wait;
43290 };
43291@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
43292 struct ib_fmr_pool *pool = pool_ptr;
43293
43294 do {
43295- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
43296+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
43297 ib_fmr_batch_release(pool);
43298
43299- atomic_inc(&pool->flush_ser);
43300+ atomic_inc_unchecked(&pool->flush_ser);
43301 wake_up_interruptible(&pool->force_wait);
43302
43303 if (pool->flush_function)
43304@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
43305 }
43306
43307 set_current_state(TASK_INTERRUPTIBLE);
43308- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
43309+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
43310 !kthread_should_stop())
43311 schedule();
43312 __set_current_state(TASK_RUNNING);
43313@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
43314 pool->dirty_watermark = params->dirty_watermark;
43315 pool->dirty_len = 0;
43316 spin_lock_init(&pool->pool_lock);
43317- atomic_set(&pool->req_ser, 0);
43318- atomic_set(&pool->flush_ser, 0);
43319+ atomic_set_unchecked(&pool->req_ser, 0);
43320+ atomic_set_unchecked(&pool->flush_ser, 0);
43321 init_waitqueue_head(&pool->force_wait);
43322
43323 pool->thread = kthread_run(ib_fmr_cleanup_thread,
43324@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
43325 }
43326 spin_unlock_irq(&pool->pool_lock);
43327
43328- serial = atomic_inc_return(&pool->req_ser);
43329+ serial = atomic_inc_return_unchecked(&pool->req_ser);
43330 wake_up_process(pool->thread);
43331
43332 if (wait_event_interruptible(pool->force_wait,
43333- atomic_read(&pool->flush_ser) - serial >= 0))
43334+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
43335 return -EINTR;
43336
43337 return 0;
43338@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
43339 } else {
43340 list_add_tail(&fmr->list, &pool->dirty_list);
43341 if (++pool->dirty_len >= pool->dirty_watermark) {
43342- atomic_inc(&pool->req_ser);
43343+ atomic_inc_unchecked(&pool->req_ser);
43344 wake_up_process(pool->thread);
43345 }
43346 }
43347diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
43348index a9f0489..27a161b 100644
43349--- a/drivers/infiniband/core/uverbs_cmd.c
43350+++ b/drivers/infiniband/core/uverbs_cmd.c
43351@@ -951,6 +951,9 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
43352 if (copy_from_user(&cmd, buf, sizeof cmd))
43353 return -EFAULT;
43354
43355+ if (!access_ok_noprefault(VERIFY_READ, cmd.start, cmd.length))
43356+ return -EFAULT;
43357+
43358 INIT_UDATA(&udata, buf + sizeof cmd,
43359 (unsigned long) cmd.response + sizeof resp,
43360 in_len - sizeof cmd, out_len - sizeof resp);
43361diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
43362index 6791fd1..78bdcdf 100644
43363--- a/drivers/infiniband/hw/cxgb4/mem.c
43364+++ b/drivers/infiniband/hw/cxgb4/mem.c
43365@@ -256,7 +256,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
43366 int err;
43367 struct fw_ri_tpte tpt;
43368 u32 stag_idx;
43369- static atomic_t key;
43370+ static atomic_unchecked_t key;
43371
43372 if (c4iw_fatal_error(rdev))
43373 return -EIO;
43374@@ -277,7 +277,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
43375 if (rdev->stats.stag.cur > rdev->stats.stag.max)
43376 rdev->stats.stag.max = rdev->stats.stag.cur;
43377 mutex_unlock(&rdev->stats.lock);
43378- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
43379+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
43380 }
43381 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
43382 __func__, stag_state, type, pdid, stag_idx);
43383diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
43384index 79b3dbc..96e5fcc 100644
43385--- a/drivers/infiniband/hw/ipath/ipath_rc.c
43386+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
43387@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
43388 struct ib_atomic_eth *ateth;
43389 struct ipath_ack_entry *e;
43390 u64 vaddr;
43391- atomic64_t *maddr;
43392+ atomic64_unchecked_t *maddr;
43393 u64 sdata;
43394 u32 rkey;
43395 u8 next;
43396@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
43397 IB_ACCESS_REMOTE_ATOMIC)))
43398 goto nack_acc_unlck;
43399 /* Perform atomic OP and save result. */
43400- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
43401+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
43402 sdata = be64_to_cpu(ateth->swap_data);
43403 e = &qp->s_ack_queue[qp->r_head_ack_queue];
43404 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
43405- (u64) atomic64_add_return(sdata, maddr) - sdata :
43406+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
43407 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
43408 be64_to_cpu(ateth->compare_data),
43409 sdata);
43410diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
43411index 1f95bba..9530f87 100644
43412--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
43413+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
43414@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
43415 unsigned long flags;
43416 struct ib_wc wc;
43417 u64 sdata;
43418- atomic64_t *maddr;
43419+ atomic64_unchecked_t *maddr;
43420 enum ib_wc_status send_status;
43421
43422 /*
43423@@ -382,11 +382,11 @@ again:
43424 IB_ACCESS_REMOTE_ATOMIC)))
43425 goto acc_err;
43426 /* Perform atomic OP and save result. */
43427- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
43428+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
43429 sdata = wqe->wr.wr.atomic.compare_add;
43430 *(u64 *) sqp->s_sge.sge.vaddr =
43431 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
43432- (u64) atomic64_add_return(sdata, maddr) - sdata :
43433+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
43434 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
43435 sdata, wqe->wr.wr.atomic.swap);
43436 goto send_comp;
43437diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
43438index 5904026..f1c30e5 100644
43439--- a/drivers/infiniband/hw/mlx4/mad.c
43440+++ b/drivers/infiniband/hw/mlx4/mad.c
43441@@ -106,7 +106,7 @@ __be64 mlx4_ib_gen_node_guid(void)
43442
43443 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
43444 {
43445- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
43446+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
43447 cpu_to_be64(0xff00000000000000LL);
43448 }
43449
43450diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
43451index ed327e6..ca1739e0 100644
43452--- a/drivers/infiniband/hw/mlx4/mcg.c
43453+++ b/drivers/infiniband/hw/mlx4/mcg.c
43454@@ -1041,7 +1041,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
43455 {
43456 char name[20];
43457
43458- atomic_set(&ctx->tid, 0);
43459+ atomic_set_unchecked(&ctx->tid, 0);
43460 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
43461 ctx->mcg_wq = create_singlethread_workqueue(name);
43462 if (!ctx->mcg_wq)
43463diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
43464index f829fd9..1a8d436 100644
43465--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
43466+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
43467@@ -439,7 +439,7 @@ struct mlx4_ib_demux_ctx {
43468 struct list_head mcg_mgid0_list;
43469 struct workqueue_struct *mcg_wq;
43470 struct mlx4_ib_demux_pv_ctx **tun;
43471- atomic_t tid;
43472+ atomic_unchecked_t tid;
43473 int flushing; /* flushing the work queue */
43474 };
43475
43476diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
43477index 9d3e5c1..6f166df 100644
43478--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
43479+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
43480@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
43481 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
43482 }
43483
43484-int mthca_QUERY_FW(struct mthca_dev *dev)
43485+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
43486 {
43487 struct mthca_mailbox *mailbox;
43488 u32 *outbox;
43489@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43490 CMD_TIME_CLASS_B);
43491 }
43492
43493-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43494+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43495 int num_mtt)
43496 {
43497 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
43498@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
43499 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
43500 }
43501
43502-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43503+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43504 int eq_num)
43505 {
43506 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
43507@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
43508 CMD_TIME_CLASS_B);
43509 }
43510
43511-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
43512+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
43513 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
43514 void *in_mad, void *response_mad)
43515 {
43516diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
43517index ded76c1..0cf0a08 100644
43518--- a/drivers/infiniband/hw/mthca/mthca_main.c
43519+++ b/drivers/infiniband/hw/mthca/mthca_main.c
43520@@ -692,7 +692,7 @@ err_close:
43521 return err;
43522 }
43523
43524-static int mthca_setup_hca(struct mthca_dev *dev)
43525+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
43526 {
43527 int err;
43528
43529diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
43530index ed9a989..6aa5dc2 100644
43531--- a/drivers/infiniband/hw/mthca/mthca_mr.c
43532+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
43533@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
43534 * through the bitmaps)
43535 */
43536
43537-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
43538+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
43539 {
43540 int o;
43541 int m;
43542@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
43543 return key;
43544 }
43545
43546-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
43547+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
43548 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
43549 {
43550 struct mthca_mailbox *mailbox;
43551@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
43552 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
43553 }
43554
43555-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
43556+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
43557 u64 *buffer_list, int buffer_size_shift,
43558 int list_len, u64 iova, u64 total_size,
43559 u32 access, struct mthca_mr *mr)
43560diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
43561index 415f8e1..e34214e 100644
43562--- a/drivers/infiniband/hw/mthca/mthca_provider.c
43563+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
43564@@ -764,7 +764,7 @@ unlock:
43565 return 0;
43566 }
43567
43568-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
43569+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
43570 {
43571 struct mthca_dev *dev = to_mdev(ibcq->device);
43572 struct mthca_cq *cq = to_mcq(ibcq);
43573diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
43574index 3b2a6dc..bce26ff 100644
43575--- a/drivers/infiniband/hw/nes/nes.c
43576+++ b/drivers/infiniband/hw/nes/nes.c
43577@@ -97,7 +97,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
43578 LIST_HEAD(nes_adapter_list);
43579 static LIST_HEAD(nes_dev_list);
43580
43581-atomic_t qps_destroyed;
43582+atomic_unchecked_t qps_destroyed;
43583
43584 static unsigned int ee_flsh_adapter;
43585 static unsigned int sysfs_nonidx_addr;
43586@@ -278,7 +278,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
43587 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
43588 struct nes_adapter *nesadapter = nesdev->nesadapter;
43589
43590- atomic_inc(&qps_destroyed);
43591+ atomic_inc_unchecked(&qps_destroyed);
43592
43593 /* Free the control structures */
43594
43595diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
43596index bd9d132..70d84f4 100644
43597--- a/drivers/infiniband/hw/nes/nes.h
43598+++ b/drivers/infiniband/hw/nes/nes.h
43599@@ -180,17 +180,17 @@ extern unsigned int nes_debug_level;
43600 extern unsigned int wqm_quanta;
43601 extern struct list_head nes_adapter_list;
43602
43603-extern atomic_t cm_connects;
43604-extern atomic_t cm_accepts;
43605-extern atomic_t cm_disconnects;
43606-extern atomic_t cm_closes;
43607-extern atomic_t cm_connecteds;
43608-extern atomic_t cm_connect_reqs;
43609-extern atomic_t cm_rejects;
43610-extern atomic_t mod_qp_timouts;
43611-extern atomic_t qps_created;
43612-extern atomic_t qps_destroyed;
43613-extern atomic_t sw_qps_destroyed;
43614+extern atomic_unchecked_t cm_connects;
43615+extern atomic_unchecked_t cm_accepts;
43616+extern atomic_unchecked_t cm_disconnects;
43617+extern atomic_unchecked_t cm_closes;
43618+extern atomic_unchecked_t cm_connecteds;
43619+extern atomic_unchecked_t cm_connect_reqs;
43620+extern atomic_unchecked_t cm_rejects;
43621+extern atomic_unchecked_t mod_qp_timouts;
43622+extern atomic_unchecked_t qps_created;
43623+extern atomic_unchecked_t qps_destroyed;
43624+extern atomic_unchecked_t sw_qps_destroyed;
43625 extern u32 mh_detected;
43626 extern u32 mh_pauses_sent;
43627 extern u32 cm_packets_sent;
43628@@ -199,16 +199,16 @@ extern u32 cm_packets_created;
43629 extern u32 cm_packets_received;
43630 extern u32 cm_packets_dropped;
43631 extern u32 cm_packets_retrans;
43632-extern atomic_t cm_listens_created;
43633-extern atomic_t cm_listens_destroyed;
43634+extern atomic_unchecked_t cm_listens_created;
43635+extern atomic_unchecked_t cm_listens_destroyed;
43636 extern u32 cm_backlog_drops;
43637-extern atomic_t cm_loopbacks;
43638-extern atomic_t cm_nodes_created;
43639-extern atomic_t cm_nodes_destroyed;
43640-extern atomic_t cm_accel_dropped_pkts;
43641-extern atomic_t cm_resets_recvd;
43642-extern atomic_t pau_qps_created;
43643-extern atomic_t pau_qps_destroyed;
43644+extern atomic_unchecked_t cm_loopbacks;
43645+extern atomic_unchecked_t cm_nodes_created;
43646+extern atomic_unchecked_t cm_nodes_destroyed;
43647+extern atomic_unchecked_t cm_accel_dropped_pkts;
43648+extern atomic_unchecked_t cm_resets_recvd;
43649+extern atomic_unchecked_t pau_qps_created;
43650+extern atomic_unchecked_t pau_qps_destroyed;
43651
43652 extern u32 int_mod_timer_init;
43653 extern u32 int_mod_cq_depth_256;
43654diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
43655index 6f09a72..cf4399d 100644
43656--- a/drivers/infiniband/hw/nes/nes_cm.c
43657+++ b/drivers/infiniband/hw/nes/nes_cm.c
43658@@ -69,14 +69,14 @@ u32 cm_packets_dropped;
43659 u32 cm_packets_retrans;
43660 u32 cm_packets_created;
43661 u32 cm_packets_received;
43662-atomic_t cm_listens_created;
43663-atomic_t cm_listens_destroyed;
43664+atomic_unchecked_t cm_listens_created;
43665+atomic_unchecked_t cm_listens_destroyed;
43666 u32 cm_backlog_drops;
43667-atomic_t cm_loopbacks;
43668-atomic_t cm_nodes_created;
43669-atomic_t cm_nodes_destroyed;
43670-atomic_t cm_accel_dropped_pkts;
43671-atomic_t cm_resets_recvd;
43672+atomic_unchecked_t cm_loopbacks;
43673+atomic_unchecked_t cm_nodes_created;
43674+atomic_unchecked_t cm_nodes_destroyed;
43675+atomic_unchecked_t cm_accel_dropped_pkts;
43676+atomic_unchecked_t cm_resets_recvd;
43677
43678 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
43679 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
43680@@ -135,28 +135,28 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16);
43681 /* instance of function pointers for client API */
43682 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
43683 static struct nes_cm_ops nes_cm_api = {
43684- mini_cm_accelerated,
43685- mini_cm_listen,
43686- mini_cm_del_listen,
43687- mini_cm_connect,
43688- mini_cm_close,
43689- mini_cm_accept,
43690- mini_cm_reject,
43691- mini_cm_recv_pkt,
43692- mini_cm_dealloc_core,
43693- mini_cm_get,
43694- mini_cm_set
43695+ .accelerated = mini_cm_accelerated,
43696+ .listen = mini_cm_listen,
43697+ .stop_listener = mini_cm_del_listen,
43698+ .connect = mini_cm_connect,
43699+ .close = mini_cm_close,
43700+ .accept = mini_cm_accept,
43701+ .reject = mini_cm_reject,
43702+ .recv_pkt = mini_cm_recv_pkt,
43703+ .destroy_cm_core = mini_cm_dealloc_core,
43704+ .get = mini_cm_get,
43705+ .set = mini_cm_set
43706 };
43707
43708 static struct nes_cm_core *g_cm_core;
43709
43710-atomic_t cm_connects;
43711-atomic_t cm_accepts;
43712-atomic_t cm_disconnects;
43713-atomic_t cm_closes;
43714-atomic_t cm_connecteds;
43715-atomic_t cm_connect_reqs;
43716-atomic_t cm_rejects;
43717+atomic_unchecked_t cm_connects;
43718+atomic_unchecked_t cm_accepts;
43719+atomic_unchecked_t cm_disconnects;
43720+atomic_unchecked_t cm_closes;
43721+atomic_unchecked_t cm_connecteds;
43722+atomic_unchecked_t cm_connect_reqs;
43723+atomic_unchecked_t cm_rejects;
43724
43725 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
43726 {
43727@@ -1436,7 +1436,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
43728 kfree(listener);
43729 listener = NULL;
43730 ret = 0;
43731- atomic_inc(&cm_listens_destroyed);
43732+ atomic_inc_unchecked(&cm_listens_destroyed);
43733 } else {
43734 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
43735 }
43736@@ -1637,7 +1637,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
43737 cm_node->rem_mac);
43738
43739 add_hte_node(cm_core, cm_node);
43740- atomic_inc(&cm_nodes_created);
43741+ atomic_inc_unchecked(&cm_nodes_created);
43742
43743 return cm_node;
43744 }
43745@@ -1698,7 +1698,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
43746 }
43747
43748 atomic_dec(&cm_core->node_cnt);
43749- atomic_inc(&cm_nodes_destroyed);
43750+ atomic_inc_unchecked(&cm_nodes_destroyed);
43751 nesqp = cm_node->nesqp;
43752 if (nesqp) {
43753 nesqp->cm_node = NULL;
43754@@ -1762,7 +1762,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
43755
43756 static void drop_packet(struct sk_buff *skb)
43757 {
43758- atomic_inc(&cm_accel_dropped_pkts);
43759+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
43760 dev_kfree_skb_any(skb);
43761 }
43762
43763@@ -1825,7 +1825,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
43764 {
43765
43766 int reset = 0; /* whether to send reset in case of err.. */
43767- atomic_inc(&cm_resets_recvd);
43768+ atomic_inc_unchecked(&cm_resets_recvd);
43769 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
43770 " refcnt=%d\n", cm_node, cm_node->state,
43771 atomic_read(&cm_node->ref_count));
43772@@ -2492,7 +2492,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
43773 rem_ref_cm_node(cm_node->cm_core, cm_node);
43774 return NULL;
43775 }
43776- atomic_inc(&cm_loopbacks);
43777+ atomic_inc_unchecked(&cm_loopbacks);
43778 loopbackremotenode->loopbackpartner = cm_node;
43779 loopbackremotenode->tcp_cntxt.rcv_wscale =
43780 NES_CM_DEFAULT_RCV_WND_SCALE;
43781@@ -2773,7 +2773,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
43782 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
43783 else {
43784 rem_ref_cm_node(cm_core, cm_node);
43785- atomic_inc(&cm_accel_dropped_pkts);
43786+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
43787 dev_kfree_skb_any(skb);
43788 }
43789 break;
43790@@ -3081,7 +3081,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43791
43792 if ((cm_id) && (cm_id->event_handler)) {
43793 if (issue_disconn) {
43794- atomic_inc(&cm_disconnects);
43795+ atomic_inc_unchecked(&cm_disconnects);
43796 cm_event.event = IW_CM_EVENT_DISCONNECT;
43797 cm_event.status = disconn_status;
43798 cm_event.local_addr = cm_id->local_addr;
43799@@ -3103,7 +3103,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43800 }
43801
43802 if (issue_close) {
43803- atomic_inc(&cm_closes);
43804+ atomic_inc_unchecked(&cm_closes);
43805 nes_disconnect(nesqp, 1);
43806
43807 cm_id->provider_data = nesqp;
43808@@ -3241,7 +3241,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43809
43810 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
43811 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
43812- atomic_inc(&cm_accepts);
43813+ atomic_inc_unchecked(&cm_accepts);
43814
43815 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
43816 netdev_refcnt_read(nesvnic->netdev));
43817@@ -3439,7 +3439,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
43818 struct nes_cm_core *cm_core;
43819 u8 *start_buff;
43820
43821- atomic_inc(&cm_rejects);
43822+ atomic_inc_unchecked(&cm_rejects);
43823 cm_node = (struct nes_cm_node *)cm_id->provider_data;
43824 loopback = cm_node->loopbackpartner;
43825 cm_core = cm_node->cm_core;
43826@@ -3504,7 +3504,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43827 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
43828 ntohs(laddr->sin_port));
43829
43830- atomic_inc(&cm_connects);
43831+ atomic_inc_unchecked(&cm_connects);
43832 nesqp->active_conn = 1;
43833
43834 /* cache the cm_id in the qp */
43835@@ -3649,7 +3649,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
43836 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
43837 return err;
43838 }
43839- atomic_inc(&cm_listens_created);
43840+ atomic_inc_unchecked(&cm_listens_created);
43841 }
43842
43843 cm_id->add_ref(cm_id);
43844@@ -3756,7 +3756,7 @@ static void cm_event_connected(struct nes_cm_event *event)
43845
43846 if (nesqp->destroyed)
43847 return;
43848- atomic_inc(&cm_connecteds);
43849+ atomic_inc_unchecked(&cm_connecteds);
43850 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
43851 " local port 0x%04X. jiffies = %lu.\n",
43852 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
43853@@ -3941,7 +3941,7 @@ static void cm_event_reset(struct nes_cm_event *event)
43854
43855 cm_id->add_ref(cm_id);
43856 ret = cm_id->event_handler(cm_id, &cm_event);
43857- atomic_inc(&cm_closes);
43858+ atomic_inc_unchecked(&cm_closes);
43859 cm_event.event = IW_CM_EVENT_CLOSE;
43860 cm_event.status = 0;
43861 cm_event.provider_data = cm_id->provider_data;
43862@@ -3981,7 +3981,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
43863 return;
43864 cm_id = cm_node->cm_id;
43865
43866- atomic_inc(&cm_connect_reqs);
43867+ atomic_inc_unchecked(&cm_connect_reqs);
43868 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43869 cm_node, cm_id, jiffies);
43870
43871@@ -4030,7 +4030,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
43872 return;
43873 cm_id = cm_node->cm_id;
43874
43875- atomic_inc(&cm_connect_reqs);
43876+ atomic_inc_unchecked(&cm_connect_reqs);
43877 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43878 cm_node, cm_id, jiffies);
43879
43880diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
43881index 4166452..fc952c3 100644
43882--- a/drivers/infiniband/hw/nes/nes_mgt.c
43883+++ b/drivers/infiniband/hw/nes/nes_mgt.c
43884@@ -40,8 +40,8 @@
43885 #include "nes.h"
43886 #include "nes_mgt.h"
43887
43888-atomic_t pau_qps_created;
43889-atomic_t pau_qps_destroyed;
43890+atomic_unchecked_t pau_qps_created;
43891+atomic_unchecked_t pau_qps_destroyed;
43892
43893 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
43894 {
43895@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
43896 {
43897 struct sk_buff *skb;
43898 unsigned long flags;
43899- atomic_inc(&pau_qps_destroyed);
43900+ atomic_inc_unchecked(&pau_qps_destroyed);
43901
43902 /* Free packets that have not yet been forwarded */
43903 /* Lock is acquired by skb_dequeue when removing the skb */
43904@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
43905 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
43906 skb_queue_head_init(&nesqp->pau_list);
43907 spin_lock_init(&nesqp->pau_lock);
43908- atomic_inc(&pau_qps_created);
43909+ atomic_inc_unchecked(&pau_qps_created);
43910 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
43911 }
43912
43913diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
43914index 70acda9..a96de9d 100644
43915--- a/drivers/infiniband/hw/nes/nes_nic.c
43916+++ b/drivers/infiniband/hw/nes/nes_nic.c
43917@@ -1274,39 +1274,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
43918 target_stat_values[++index] = mh_detected;
43919 target_stat_values[++index] = mh_pauses_sent;
43920 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
43921- target_stat_values[++index] = atomic_read(&cm_connects);
43922- target_stat_values[++index] = atomic_read(&cm_accepts);
43923- target_stat_values[++index] = atomic_read(&cm_disconnects);
43924- target_stat_values[++index] = atomic_read(&cm_connecteds);
43925- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
43926- target_stat_values[++index] = atomic_read(&cm_rejects);
43927- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
43928- target_stat_values[++index] = atomic_read(&qps_created);
43929- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
43930- target_stat_values[++index] = atomic_read(&qps_destroyed);
43931- target_stat_values[++index] = atomic_read(&cm_closes);
43932+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
43933+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
43934+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
43935+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
43936+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
43937+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
43938+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
43939+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
43940+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
43941+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
43942+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
43943 target_stat_values[++index] = cm_packets_sent;
43944 target_stat_values[++index] = cm_packets_bounced;
43945 target_stat_values[++index] = cm_packets_created;
43946 target_stat_values[++index] = cm_packets_received;
43947 target_stat_values[++index] = cm_packets_dropped;
43948 target_stat_values[++index] = cm_packets_retrans;
43949- target_stat_values[++index] = atomic_read(&cm_listens_created);
43950- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
43951+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
43952+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
43953 target_stat_values[++index] = cm_backlog_drops;
43954- target_stat_values[++index] = atomic_read(&cm_loopbacks);
43955- target_stat_values[++index] = atomic_read(&cm_nodes_created);
43956- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
43957- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
43958- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
43959+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
43960+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
43961+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
43962+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
43963+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
43964 target_stat_values[++index] = nesadapter->free_4kpbl;
43965 target_stat_values[++index] = nesadapter->free_256pbl;
43966 target_stat_values[++index] = int_mod_timer_init;
43967 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
43968 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
43969 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
43970- target_stat_values[++index] = atomic_read(&pau_qps_created);
43971- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
43972+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
43973+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
43974 }
43975
43976 /**
43977diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
43978index c0d0296..3185f57 100644
43979--- a/drivers/infiniband/hw/nes/nes_verbs.c
43980+++ b/drivers/infiniband/hw/nes/nes_verbs.c
43981@@ -46,9 +46,9 @@
43982
43983 #include <rdma/ib_umem.h>
43984
43985-atomic_t mod_qp_timouts;
43986-atomic_t qps_created;
43987-atomic_t sw_qps_destroyed;
43988+atomic_unchecked_t mod_qp_timouts;
43989+atomic_unchecked_t qps_created;
43990+atomic_unchecked_t sw_qps_destroyed;
43991
43992 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
43993
43994@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
43995 if (init_attr->create_flags)
43996 return ERR_PTR(-EINVAL);
43997
43998- atomic_inc(&qps_created);
43999+ atomic_inc_unchecked(&qps_created);
44000 switch (init_attr->qp_type) {
44001 case IB_QPT_RC:
44002 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
44003@@ -1468,7 +1468,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
44004 struct iw_cm_event cm_event;
44005 int ret = 0;
44006
44007- atomic_inc(&sw_qps_destroyed);
44008+ atomic_inc_unchecked(&sw_qps_destroyed);
44009 nesqp->destroyed = 1;
44010
44011 /* Blow away the connection if it exists. */
44012diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
44013index ffd48bf..83cdb56 100644
44014--- a/drivers/infiniband/hw/qib/qib.h
44015+++ b/drivers/infiniband/hw/qib/qib.h
44016@@ -52,6 +52,7 @@
44017 #include <linux/kref.h>
44018 #include <linux/sched.h>
44019 #include <linux/kthread.h>
44020+#include <linux/slab.h>
44021
44022 #include "qib_common.h"
44023 #include "qib_verbs.h"
44024diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
44025index cdc7df4..a2fdfdb 100644
44026--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
44027+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
44028@@ -156,7 +156,7 @@ static size_t ipoib_get_size(const struct net_device *dev)
44029 nla_total_size(2); /* IFLA_IPOIB_UMCAST */
44030 }
44031
44032-static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
44033+static struct rtnl_link_ops ipoib_link_ops = {
44034 .kind = "ipoib",
44035 .maxtype = IFLA_IPOIB_MAX,
44036 .policy = ipoib_policy,
44037diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
44038index e853a21..56fc5a8 100644
44039--- a/drivers/input/gameport/gameport.c
44040+++ b/drivers/input/gameport/gameport.c
44041@@ -527,14 +527,14 @@ EXPORT_SYMBOL(gameport_set_phys);
44042 */
44043 static void gameport_init_port(struct gameport *gameport)
44044 {
44045- static atomic_t gameport_no = ATOMIC_INIT(-1);
44046+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(-1);
44047
44048 __module_get(THIS_MODULE);
44049
44050 mutex_init(&gameport->drv_mutex);
44051 device_initialize(&gameport->dev);
44052 dev_set_name(&gameport->dev, "gameport%lu",
44053- (unsigned long)atomic_inc_return(&gameport_no));
44054+ (unsigned long)atomic_inc_return_unchecked(&gameport_no));
44055 gameport->dev.bus = &gameport_bus;
44056 gameport->dev.release = gameport_release_port;
44057 if (gameport->parent)
44058diff --git a/drivers/input/input.c b/drivers/input/input.c
44059index cc357f1..ee42fbc 100644
44060--- a/drivers/input/input.c
44061+++ b/drivers/input/input.c
44062@@ -1781,7 +1781,7 @@ EXPORT_SYMBOL_GPL(input_class);
44063 */
44064 struct input_dev *input_allocate_device(void)
44065 {
44066- static atomic_t input_no = ATOMIC_INIT(-1);
44067+ static atomic_unchecked_t input_no = ATOMIC_INIT(-1);
44068 struct input_dev *dev;
44069
44070 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
44071@@ -1796,7 +1796,7 @@ struct input_dev *input_allocate_device(void)
44072 INIT_LIST_HEAD(&dev->node);
44073
44074 dev_set_name(&dev->dev, "input%lu",
44075- (unsigned long)atomic_inc_return(&input_no));
44076+ (unsigned long)atomic_inc_return_unchecked(&input_no));
44077
44078 __module_get(THIS_MODULE);
44079 }
44080diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
44081index 4a95b22..874c182 100644
44082--- a/drivers/input/joystick/sidewinder.c
44083+++ b/drivers/input/joystick/sidewinder.c
44084@@ -30,6 +30,7 @@
44085 #include <linux/kernel.h>
44086 #include <linux/module.h>
44087 #include <linux/slab.h>
44088+#include <linux/sched.h>
44089 #include <linux/input.h>
44090 #include <linux/gameport.h>
44091 #include <linux/jiffies.h>
44092diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
44093index 3aa2f3f..53c00ea 100644
44094--- a/drivers/input/joystick/xpad.c
44095+++ b/drivers/input/joystick/xpad.c
44096@@ -886,7 +886,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
44097
44098 static int xpad_led_probe(struct usb_xpad *xpad)
44099 {
44100- static atomic_t led_seq = ATOMIC_INIT(-1);
44101+ static atomic_unchecked_t led_seq = ATOMIC_INIT(-1);
44102 unsigned long led_no;
44103 struct xpad_led *led;
44104 struct led_classdev *led_cdev;
44105@@ -899,7 +899,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
44106 if (!led)
44107 return -ENOMEM;
44108
44109- led_no = atomic_inc_return(&led_seq);
44110+ led_no = atomic_inc_return_unchecked(&led_seq);
44111
44112 snprintf(led->name, sizeof(led->name), "xpad%lu", led_no);
44113 led->xpad = xpad;
44114diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
44115index ac1fa5f..5f7502c 100644
44116--- a/drivers/input/misc/ims-pcu.c
44117+++ b/drivers/input/misc/ims-pcu.c
44118@@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
44119
44120 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
44121 {
44122- static atomic_t device_no = ATOMIC_INIT(-1);
44123+ static atomic_unchecked_t device_no = ATOMIC_INIT(-1);
44124
44125 const struct ims_pcu_device_info *info;
44126 int error;
44127@@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
44128 }
44129
44130 /* Device appears to be operable, complete initialization */
44131- pcu->device_no = atomic_inc_return(&device_no);
44132+ pcu->device_no = atomic_inc_return_unchecked(&device_no);
44133
44134 /*
44135 * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
44136diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
44137index d02e1bd..d719719 100644
44138--- a/drivers/input/mouse/psmouse.h
44139+++ b/drivers/input/mouse/psmouse.h
44140@@ -124,7 +124,7 @@ struct psmouse_attribute {
44141 ssize_t (*set)(struct psmouse *psmouse, void *data,
44142 const char *buf, size_t count);
44143 bool protect;
44144-};
44145+} __do_const;
44146 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
44147
44148 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
44149diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
44150index b604564..3f14ae4 100644
44151--- a/drivers/input/mousedev.c
44152+++ b/drivers/input/mousedev.c
44153@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
44154
44155 spin_unlock_irq(&client->packet_lock);
44156
44157- if (copy_to_user(buffer, data, count))
44158+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
44159 return -EFAULT;
44160
44161 return count;
44162diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
44163index a05a517..323a2fd 100644
44164--- a/drivers/input/serio/serio.c
44165+++ b/drivers/input/serio/serio.c
44166@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
44167 */
44168 static void serio_init_port(struct serio *serio)
44169 {
44170- static atomic_t serio_no = ATOMIC_INIT(-1);
44171+ static atomic_unchecked_t serio_no = ATOMIC_INIT(-1);
44172
44173 __module_get(THIS_MODULE);
44174
44175@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
44176 mutex_init(&serio->drv_mutex);
44177 device_initialize(&serio->dev);
44178 dev_set_name(&serio->dev, "serio%lu",
44179- (unsigned long)atomic_inc_return(&serio_no));
44180+ (unsigned long)atomic_inc_return_unchecked(&serio_no));
44181 serio->dev.bus = &serio_bus;
44182 serio->dev.release = serio_release_port;
44183 serio->dev.groups = serio_device_attr_groups;
44184diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
44185index 71ef5d6..93380a9 100644
44186--- a/drivers/input/serio/serio_raw.c
44187+++ b/drivers/input/serio/serio_raw.c
44188@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
44189
44190 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
44191 {
44192- static atomic_t serio_raw_no = ATOMIC_INIT(-1);
44193+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(-1);
44194 struct serio_raw *serio_raw;
44195 int err;
44196
44197@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
44198 }
44199
44200 snprintf(serio_raw->name, sizeof(serio_raw->name),
44201- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no));
44202+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no));
44203 kref_init(&serio_raw->kref);
44204 INIT_LIST_HEAD(&serio_raw->client_list);
44205 init_waitqueue_head(&serio_raw->wait);
44206diff --git a/drivers/input/touchscreen/htcpen.c b/drivers/input/touchscreen/htcpen.c
44207index 92e2243..8fd9092 100644
44208--- a/drivers/input/touchscreen/htcpen.c
44209+++ b/drivers/input/touchscreen/htcpen.c
44210@@ -219,7 +219,7 @@ static struct isa_driver htcpen_isa_driver = {
44211 }
44212 };
44213
44214-static struct dmi_system_id htcshift_dmi_table[] __initdata = {
44215+static const struct dmi_system_id htcshift_dmi_table[] __initconst = {
44216 {
44217 .ident = "Shift",
44218 .matches = {
44219diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
44220index 48882c1..93e0987 100644
44221--- a/drivers/iommu/amd_iommu.c
44222+++ b/drivers/iommu/amd_iommu.c
44223@@ -823,11 +823,21 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
44224
44225 static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
44226 {
44227+ phys_addr_t physaddr;
44228 WARN_ON(address & 0x7ULL);
44229
44230 memset(cmd, 0, sizeof(*cmd));
44231- cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
44232- cmd->data[1] = upper_32_bits(__pa(address));
44233+
44234+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
44235+ if (object_starts_on_stack((void *)address)) {
44236+ void *adjbuf = (void *)address - current->stack + current->lowmem_stack;
44237+ physaddr = __pa((u64)adjbuf);
44238+ } else
44239+#endif
44240+ physaddr = __pa(address);
44241+
44242+ cmd->data[0] = lower_32_bits(physaddr) | CMD_COMPL_WAIT_STORE_MASK;
44243+ cmd->data[1] = upper_32_bits(physaddr);
44244 cmd->data[2] = 1;
44245 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
44246 }
44247diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
44248index a3adde6..988ee96 100644
44249--- a/drivers/iommu/arm-smmu.c
44250+++ b/drivers/iommu/arm-smmu.c
44251@@ -338,7 +338,7 @@ enum arm_smmu_domain_stage {
44252
44253 struct arm_smmu_domain {
44254 struct arm_smmu_device *smmu;
44255- struct io_pgtable_ops *pgtbl_ops;
44256+ struct io_pgtable *pgtbl;
44257 spinlock_t pgtbl_lock;
44258 struct arm_smmu_cfg cfg;
44259 enum arm_smmu_domain_stage stage;
44260@@ -833,7 +833,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
44261 {
44262 int irq, start, ret = 0;
44263 unsigned long ias, oas;
44264- struct io_pgtable_ops *pgtbl_ops;
44265+ struct io_pgtable *pgtbl;
44266 struct io_pgtable_cfg pgtbl_cfg;
44267 enum io_pgtable_fmt fmt;
44268 struct arm_smmu_domain *smmu_domain = domain->priv;
44269@@ -918,14 +918,16 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
44270 };
44271
44272 smmu_domain->smmu = smmu;
44273- pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
44274- if (!pgtbl_ops) {
44275+ pgtbl = alloc_io_pgtable(fmt, &pgtbl_cfg, smmu_domain);
44276+ if (!pgtbl) {
44277 ret = -ENOMEM;
44278 goto out_clear_smmu;
44279 }
44280
44281 /* Update our support page sizes to reflect the page table format */
44282- arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
44283+ pax_open_kernel();
44284+ *(unsigned long *)&arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
44285+ pax_close_kernel();
44286
44287 /* Initialise the context bank with our page table cfg */
44288 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
44289@@ -946,7 +948,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
44290 mutex_unlock(&smmu_domain->init_mutex);
44291
44292 /* Publish page table ops for map/unmap */
44293- smmu_domain->pgtbl_ops = pgtbl_ops;
44294+ smmu_domain->pgtbl = pgtbl;
44295 return 0;
44296
44297 out_clear_smmu:
44298@@ -979,8 +981,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
44299 free_irq(irq, domain);
44300 }
44301
44302- if (smmu_domain->pgtbl_ops)
44303- free_io_pgtable_ops(smmu_domain->pgtbl_ops);
44304+ free_io_pgtable(smmu_domain->pgtbl);
44305
44306 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
44307 }
44308@@ -1204,13 +1205,13 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
44309 int ret;
44310 unsigned long flags;
44311 struct arm_smmu_domain *smmu_domain = domain->priv;
44312- struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
44313+ struct io_pgtable *iop = smmu_domain->pgtbl;
44314
44315- if (!ops)
44316+ if (!iop)
44317 return -ENODEV;
44318
44319 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
44320- ret = ops->map(ops, iova, paddr, size, prot);
44321+ ret = iop->ops->map(iop, iova, paddr, size, prot);
44322 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
44323 return ret;
44324 }
44325@@ -1221,13 +1222,13 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
44326 size_t ret;
44327 unsigned long flags;
44328 struct arm_smmu_domain *smmu_domain = domain->priv;
44329- struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
44330+ struct io_pgtable *iop = smmu_domain->pgtbl;
44331
44332- if (!ops)
44333+ if (!iop)
44334 return 0;
44335
44336 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
44337- ret = ops->unmap(ops, iova, size);
44338+ ret = iop->ops->unmap(iop, iova, size);
44339 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
44340 return ret;
44341 }
44342@@ -1238,7 +1239,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
44343 struct arm_smmu_domain *smmu_domain = domain->priv;
44344 struct arm_smmu_device *smmu = smmu_domain->smmu;
44345 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
44346- struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
44347+ struct io_pgtable *iop = smmu_domain->pgtbl;
44348 struct device *dev = smmu->dev;
44349 void __iomem *cb_base;
44350 u32 tmp;
44351@@ -1261,7 +1262,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
44352 dev_err(dev,
44353 "iova to phys timed out on 0x%pad. Falling back to software table walk.\n",
44354 &iova);
44355- return ops->iova_to_phys(ops, iova);
44356+ return iop->ops->iova_to_phys(iop, iova);
44357 }
44358
44359 phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
44360@@ -1282,9 +1283,9 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
44361 phys_addr_t ret;
44362 unsigned long flags;
44363 struct arm_smmu_domain *smmu_domain = domain->priv;
44364- struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
44365+ struct io_pgtable *iop = smmu_domain->pgtbl;
44366
44367- if (!ops)
44368+ if (!iop)
44369 return 0;
44370
44371 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
44372@@ -1292,7 +1293,7 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
44373 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
44374 ret = arm_smmu_iova_to_phys_hard(domain, iova);
44375 } else {
44376- ret = ops->iova_to_phys(ops, iova);
44377+ ret = iop->ops->iova_to_phys(iop, iova);
44378 }
44379
44380 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
44381@@ -1651,7 +1652,9 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
44382 size |= SZ_64K | SZ_512M;
44383 }
44384
44385- arm_smmu_ops.pgsize_bitmap &= size;
44386+ pax_open_kernel();
44387+ *(unsigned long *)&arm_smmu_ops.pgsize_bitmap &= size;
44388+ pax_close_kernel();
44389 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
44390
44391 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
44392diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
44393index b610a8d..08eb879 100644
44394--- a/drivers/iommu/io-pgtable-arm.c
44395+++ b/drivers/iommu/io-pgtable-arm.c
44396@@ -36,12 +36,6 @@
44397 #define io_pgtable_to_data(x) \
44398 container_of((x), struct arm_lpae_io_pgtable, iop)
44399
44400-#define io_pgtable_ops_to_pgtable(x) \
44401- container_of((x), struct io_pgtable, ops)
44402-
44403-#define io_pgtable_ops_to_data(x) \
44404- io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
44405-
44406 /*
44407 * For consistency with the architecture, we always consider
44408 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
44409@@ -302,10 +296,10 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
44410 return pte;
44411 }
44412
44413-static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
44414+static int arm_lpae_map(struct io_pgtable *iop, unsigned long iova,
44415 phys_addr_t paddr, size_t size, int iommu_prot)
44416 {
44417- struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
44418+ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
44419 arm_lpae_iopte *ptep = data->pgd;
44420 int lvl = ARM_LPAE_START_LVL(data);
44421 arm_lpae_iopte prot;
44422@@ -445,12 +439,11 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
44423 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
44424 }
44425
44426-static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
44427+static int arm_lpae_unmap(struct io_pgtable *iop, unsigned long iova,
44428 size_t size)
44429 {
44430 size_t unmapped;
44431- struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
44432- struct io_pgtable *iop = &data->iop;
44433+ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
44434 arm_lpae_iopte *ptep = data->pgd;
44435 int lvl = ARM_LPAE_START_LVL(data);
44436
44437@@ -461,10 +454,10 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
44438 return unmapped;
44439 }
44440
44441-static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
44442+static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable *iop,
44443 unsigned long iova)
44444 {
44445- struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
44446+ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
44447 arm_lpae_iopte pte, *ptep = data->pgd;
44448 int lvl = ARM_LPAE_START_LVL(data);
44449
44450@@ -531,6 +524,12 @@ static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
44451 }
44452 }
44453
44454+static struct io_pgtable_ops arm_lpae_io_pgtable_ops = {
44455+ .map = arm_lpae_map,
44456+ .unmap = arm_lpae_unmap,
44457+ .iova_to_phys = arm_lpae_iova_to_phys,
44458+};
44459+
44460 static struct arm_lpae_io_pgtable *
44461 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
44462 {
44463@@ -562,11 +561,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
44464 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
44465 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
44466
44467- data->iop.ops = (struct io_pgtable_ops) {
44468- .map = arm_lpae_map,
44469- .unmap = arm_lpae_unmap,
44470- .iova_to_phys = arm_lpae_iova_to_phys,
44471- };
44472+ data->iop.ops = &arm_lpae_io_pgtable_ops;
44473
44474 return data;
44475 }
44476@@ -825,9 +820,9 @@ static struct iommu_gather_ops dummy_tlb_ops __initdata = {
44477 .flush_pgtable = dummy_flush_pgtable,
44478 };
44479
44480-static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
44481+static void __init arm_lpae_dump_ops(struct io_pgtable *iop)
44482 {
44483- struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
44484+ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
44485 struct io_pgtable_cfg *cfg = &data->iop.cfg;
44486
44487 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
44488@@ -837,9 +832,9 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
44489 data->bits_per_level, data->pgd);
44490 }
44491
44492-#define __FAIL(ops, i) ({ \
44493+#define __FAIL(iop, i) ({ \
44494 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
44495- arm_lpae_dump_ops(ops); \
44496+ arm_lpae_dump_ops(iop); \
44497 selftest_running = false; \
44498 -EFAULT; \
44499 })
44500@@ -854,30 +849,32 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
44501 int i, j;
44502 unsigned long iova;
44503 size_t size;
44504- struct io_pgtable_ops *ops;
44505+ struct io_pgtable *iop;
44506+ const struct io_pgtable_ops *ops;
44507
44508 selftest_running = true;
44509
44510 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
44511 cfg_cookie = cfg;
44512- ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
44513- if (!ops) {
44514+ iop = alloc_io_pgtable(fmts[i], cfg, cfg);
44515+ if (!iop) {
44516 pr_err("selftest: failed to allocate io pgtable ops\n");
44517 return -ENOMEM;
44518 }
44519+ ops = iop->ops;
44520
44521 /*
44522 * Initial sanity checks.
44523 * Empty page tables shouldn't provide any translations.
44524 */
44525- if (ops->iova_to_phys(ops, 42))
44526- return __FAIL(ops, i);
44527+ if (ops->iova_to_phys(iop, 42))
44528+ return __FAIL(iop, i);
44529
44530- if (ops->iova_to_phys(ops, SZ_1G + 42))
44531- return __FAIL(ops, i);
44532+ if (ops->iova_to_phys(iop, SZ_1G + 42))
44533+ return __FAIL(iop, i);
44534
44535- if (ops->iova_to_phys(ops, SZ_2G + 42))
44536- return __FAIL(ops, i);
44537+ if (ops->iova_to_phys(iop, SZ_2G + 42))
44538+ return __FAIL(iop, i);
44539
44540 /*
44541 * Distinct mappings of different granule sizes.
44542@@ -887,19 +884,19 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
44543 while (j != BITS_PER_LONG) {
44544 size = 1UL << j;
44545
44546- if (ops->map(ops, iova, iova, size, IOMMU_READ |
44547+ if (ops->map(iop, iova, iova, size, IOMMU_READ |
44548 IOMMU_WRITE |
44549 IOMMU_NOEXEC |
44550 IOMMU_CACHE))
44551- return __FAIL(ops, i);
44552+ return __FAIL(iop, i);
44553
44554 /* Overlapping mappings */
44555- if (!ops->map(ops, iova, iova + size, size,
44556+ if (!ops->map(iop, iova, iova + size, size,
44557 IOMMU_READ | IOMMU_NOEXEC))
44558- return __FAIL(ops, i);
44559+ return __FAIL(iop, i);
44560
44561- if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
44562- return __FAIL(ops, i);
44563+ if (ops->iova_to_phys(iop, iova + 42) != (iova + 42))
44564+ return __FAIL(iop, i);
44565
44566 iova += SZ_1G;
44567 j++;
44568@@ -908,15 +905,15 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
44569
44570 /* Partial unmap */
44571 size = 1UL << __ffs(cfg->pgsize_bitmap);
44572- if (ops->unmap(ops, SZ_1G + size, size) != size)
44573- return __FAIL(ops, i);
44574+ if (ops->unmap(iop, SZ_1G + size, size) != size)
44575+ return __FAIL(iop, i);
44576
44577 /* Remap of partial unmap */
44578- if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
44579- return __FAIL(ops, i);
44580+ if (ops->map(iop, SZ_1G + size, size, size, IOMMU_READ))
44581+ return __FAIL(iop, i);
44582
44583- if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
44584- return __FAIL(ops, i);
44585+ if (ops->iova_to_phys(iop, SZ_1G + size + 42) != (size + 42))
44586+ return __FAIL(iop, i);
44587
44588 /* Full unmap */
44589 iova = 0;
44590@@ -924,25 +921,25 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
44591 while (j != BITS_PER_LONG) {
44592 size = 1UL << j;
44593
44594- if (ops->unmap(ops, iova, size) != size)
44595- return __FAIL(ops, i);
44596+ if (ops->unmap(iop, iova, size) != size)
44597+ return __FAIL(iop, i);
44598
44599- if (ops->iova_to_phys(ops, iova + 42))
44600- return __FAIL(ops, i);
44601+ if (ops->iova_to_phys(iop, iova + 42))
44602+ return __FAIL(iop, i);
44603
44604 /* Remap full block */
44605- if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
44606- return __FAIL(ops, i);
44607+ if (ops->map(iop, iova, iova, size, IOMMU_WRITE))
44608+ return __FAIL(iop, i);
44609
44610- if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
44611- return __FAIL(ops, i);
44612+ if (ops->iova_to_phys(iop, iova + 42) != (iova + 42))
44613+ return __FAIL(iop, i);
44614
44615 iova += SZ_1G;
44616 j++;
44617 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
44618 }
44619
44620- free_io_pgtable_ops(ops);
44621+ free_io_pgtable(iop);
44622 }
44623
44624 selftest_running = false;
44625diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
44626index 6436fe2..088c965 100644
44627--- a/drivers/iommu/io-pgtable.c
44628+++ b/drivers/iommu/io-pgtable.c
44629@@ -40,7 +40,7 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] =
44630 #endif
44631 };
44632
44633-struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
44634+struct io_pgtable *alloc_io_pgtable(enum io_pgtable_fmt fmt,
44635 struct io_pgtable_cfg *cfg,
44636 void *cookie)
44637 {
44638@@ -62,21 +62,18 @@ struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
44639 iop->cookie = cookie;
44640 iop->cfg = *cfg;
44641
44642- return &iop->ops;
44643+ return iop;
44644 }
44645
44646 /*
44647 * It is the IOMMU driver's responsibility to ensure that the page table
44648 * is no longer accessible to the walker by this point.
44649 */
44650-void free_io_pgtable_ops(struct io_pgtable_ops *ops)
44651+void free_io_pgtable(struct io_pgtable *iop)
44652 {
44653- struct io_pgtable *iop;
44654-
44655- if (!ops)
44656+ if (!iop)
44657 return;
44658
44659- iop = container_of(ops, struct io_pgtable, ops);
44660 iop->cfg.tlb->tlb_flush_all(iop->cookie);
44661 io_pgtable_init_table[iop->fmt]->free(iop);
44662 }
44663diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
44664index 10e32f6..0b276c8 100644
44665--- a/drivers/iommu/io-pgtable.h
44666+++ b/drivers/iommu/io-pgtable.h
44667@@ -75,17 +75,18 @@ struct io_pgtable_cfg {
44668 * These functions map directly onto the iommu_ops member functions with
44669 * the same names.
44670 */
44671+struct io_pgtable;
44672 struct io_pgtable_ops {
44673- int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
44674+ int (*map)(struct io_pgtable *iop, unsigned long iova,
44675 phys_addr_t paddr, size_t size, int prot);
44676- int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
44677+ int (*unmap)(struct io_pgtable *iop, unsigned long iova,
44678 size_t size);
44679- phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
44680+ phys_addr_t (*iova_to_phys)(struct io_pgtable *iop,
44681 unsigned long iova);
44682 };
44683
44684 /**
44685- * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
44686+ * alloc_io_pgtable() - Allocate a page table allocator for use by an IOMMU.
44687 *
44688 * @fmt: The page table format.
44689 * @cfg: The page table configuration. This will be modified to represent
44690@@ -94,9 +95,9 @@ struct io_pgtable_ops {
44691 * @cookie: An opaque token provided by the IOMMU driver and passed back to
44692 * the callback routines in cfg->tlb.
44693 */
44694-struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
44695- struct io_pgtable_cfg *cfg,
44696- void *cookie);
44697+struct io_pgtable *alloc_io_pgtable(enum io_pgtable_fmt fmt,
44698+ struct io_pgtable_cfg *cfg,
44699+ void *cookie);
44700
44701 /**
44702 * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
44703@@ -105,7 +106,7 @@ struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
44704 *
44705 * @ops: The ops returned from alloc_io_pgtable_ops.
44706 */
44707-void free_io_pgtable_ops(struct io_pgtable_ops *ops);
44708+void free_io_pgtable(struct io_pgtable *iop);
44709
44710
44711 /*
44712@@ -125,7 +126,7 @@ struct io_pgtable {
44713 enum io_pgtable_fmt fmt;
44714 void *cookie;
44715 struct io_pgtable_cfg cfg;
44716- struct io_pgtable_ops ops;
44717+ const struct io_pgtable_ops *ops;
44718 };
44719
44720 /**
44721diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
44722index 72e683d..c9db262 100644
44723--- a/drivers/iommu/iommu.c
44724+++ b/drivers/iommu/iommu.c
44725@@ -802,7 +802,7 @@ static int iommu_bus_notifier(struct notifier_block *nb,
44726 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
44727 {
44728 int err;
44729- struct notifier_block *nb;
44730+ notifier_block_no_const *nb;
44731 struct iommu_callback_data cb = {
44732 .ops = ops,
44733 };
44734diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
44735index bc39bdf..e2de272 100644
44736--- a/drivers/iommu/ipmmu-vmsa.c
44737+++ b/drivers/iommu/ipmmu-vmsa.c
44738@@ -41,7 +41,7 @@ struct ipmmu_vmsa_domain {
44739 struct iommu_domain *io_domain;
44740
44741 struct io_pgtable_cfg cfg;
44742- struct io_pgtable_ops *iop;
44743+ struct io_pgtable *iop;
44744
44745 unsigned int context_id;
44746 spinlock_t lock; /* Protects mappings */
44747@@ -323,8 +323,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
44748 domain->cfg.oas = 40;
44749 domain->cfg.tlb = &ipmmu_gather_ops;
44750
44751- domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
44752- domain);
44753+ domain->iop = alloc_io_pgtable(ARM_32_LPAE_S1, &domain->cfg, domain);
44754 if (!domain->iop)
44755 return -EINVAL;
44756
44757@@ -482,7 +481,7 @@ static void ipmmu_domain_destroy(struct iommu_domain *io_domain)
44758 * been detached.
44759 */
44760 ipmmu_domain_destroy_context(domain);
44761- free_io_pgtable_ops(domain->iop);
44762+ free_io_pgtable(domain->iop);
44763 kfree(domain);
44764 }
44765
44766@@ -551,7 +550,7 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
44767 if (!domain)
44768 return -ENODEV;
44769
44770- return domain->iop->map(domain->iop, iova, paddr, size, prot);
44771+ return domain->iop->ops->map(domain->iop, iova, paddr, size, prot);
44772 }
44773
44774 static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
44775@@ -559,7 +558,7 @@ static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
44776 {
44777 struct ipmmu_vmsa_domain *domain = io_domain->priv;
44778
44779- return domain->iop->unmap(domain->iop, iova, size);
44780+ return domain->iop->ops->unmap(domain->iop, iova, size);
44781 }
44782
44783 static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
44784@@ -569,7 +568,7 @@ static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
44785
44786 /* TODO: Is locking needed ? */
44787
44788- return domain->iop->iova_to_phys(domain->iop, iova);
44789+ return domain->iop->ops->iova_to_phys(domain->iop, iova);
44790 }
44791
44792 static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev,
44793diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
44794index 390079e..1da9d6c 100644
44795--- a/drivers/iommu/irq_remapping.c
44796+++ b/drivers/iommu/irq_remapping.c
44797@@ -329,7 +329,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
44798 void panic_if_irq_remap(const char *msg)
44799 {
44800 if (irq_remapping_enabled)
44801- panic(msg);
44802+ panic("%s", msg);
44803 }
44804
44805 static void ir_ack_apic_edge(struct irq_data *data)
44806@@ -350,10 +350,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
44807
44808 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
44809 {
44810- chip->irq_print_chip = ir_print_prefix;
44811- chip->irq_ack = ir_ack_apic_edge;
44812- chip->irq_eoi = ir_ack_apic_level;
44813- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
44814+ pax_open_kernel();
44815+ *(void **)&chip->irq_print_chip = ir_print_prefix;
44816+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
44817+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
44818+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
44819+ pax_close_kernel();
44820 }
44821
44822 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
44823diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
44824index 471e1cd..b53b870 100644
44825--- a/drivers/irqchip/irq-gic.c
44826+++ b/drivers/irqchip/irq-gic.c
44827@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
44828 * Supported arch specific GIC irq extension.
44829 * Default make them NULL.
44830 */
44831-struct irq_chip gic_arch_extn = {
44832+irq_chip_no_const gic_arch_extn = {
44833 .irq_eoi = NULL,
44834 .irq_mask = NULL,
44835 .irq_unmask = NULL,
44836@@ -318,7 +318,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
44837 chained_irq_exit(chip, desc);
44838 }
44839
44840-static struct irq_chip gic_chip = {
44841+static irq_chip_no_const gic_chip __read_only = {
44842 .name = "GIC",
44843 .irq_mask = gic_mask_irq,
44844 .irq_unmask = gic_unmask_irq,
44845diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
44846index 9a0767b..5e5f86f 100644
44847--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
44848+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
44849@@ -373,7 +373,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
44850 struct intc_irqpin_iomem *i;
44851 struct resource *io[INTC_IRQPIN_REG_NR];
44852 struct resource *irq;
44853- struct irq_chip *irq_chip;
44854+ irq_chip_no_const *irq_chip;
44855 void (*enable_fn)(struct irq_data *d);
44856 void (*disable_fn)(struct irq_data *d);
44857 const char *name = dev_name(dev);
44858diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
44859index 384e6ed..7a771b2 100644
44860--- a/drivers/irqchip/irq-renesas-irqc.c
44861+++ b/drivers/irqchip/irq-renesas-irqc.c
44862@@ -151,7 +151,7 @@ static int irqc_probe(struct platform_device *pdev)
44863 struct irqc_priv *p;
44864 struct resource *io;
44865 struct resource *irq;
44866- struct irq_chip *irq_chip;
44867+ irq_chip_no_const *irq_chip;
44868 const char *name = dev_name(&pdev->dev);
44869 int ret;
44870 int k;
44871diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
44872index 6a2df32..dc962f1 100644
44873--- a/drivers/isdn/capi/capi.c
44874+++ b/drivers/isdn/capi/capi.c
44875@@ -81,8 +81,8 @@ struct capiminor {
44876
44877 struct capi20_appl *ap;
44878 u32 ncci;
44879- atomic_t datahandle;
44880- atomic_t msgid;
44881+ atomic_unchecked_t datahandle;
44882+ atomic_unchecked_t msgid;
44883
44884 struct tty_port port;
44885 int ttyinstop;
44886@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
44887 capimsg_setu16(s, 2, mp->ap->applid);
44888 capimsg_setu8 (s, 4, CAPI_DATA_B3);
44889 capimsg_setu8 (s, 5, CAPI_RESP);
44890- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
44891+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
44892 capimsg_setu32(s, 8, mp->ncci);
44893 capimsg_setu16(s, 12, datahandle);
44894 }
44895@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
44896 mp->outbytes -= len;
44897 spin_unlock_bh(&mp->outlock);
44898
44899- datahandle = atomic_inc_return(&mp->datahandle);
44900+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
44901 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
44902 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
44903 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
44904 capimsg_setu16(skb->data, 2, mp->ap->applid);
44905 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
44906 capimsg_setu8 (skb->data, 5, CAPI_REQ);
44907- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
44908+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
44909 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
44910 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
44911 capimsg_setu16(skb->data, 16, len); /* Data length */
44912diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
44913index aecec6d..11e13c5 100644
44914--- a/drivers/isdn/gigaset/bas-gigaset.c
44915+++ b/drivers/isdn/gigaset/bas-gigaset.c
44916@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
44917
44918
44919 static const struct gigaset_ops gigops = {
44920- gigaset_write_cmd,
44921- gigaset_write_room,
44922- gigaset_chars_in_buffer,
44923- gigaset_brkchars,
44924- gigaset_init_bchannel,
44925- gigaset_close_bchannel,
44926- gigaset_initbcshw,
44927- gigaset_freebcshw,
44928- gigaset_reinitbcshw,
44929- gigaset_initcshw,
44930- gigaset_freecshw,
44931- gigaset_set_modem_ctrl,
44932- gigaset_baud_rate,
44933- gigaset_set_line_ctrl,
44934- gigaset_isoc_send_skb,
44935- gigaset_isoc_input,
44936+ .write_cmd = gigaset_write_cmd,
44937+ .write_room = gigaset_write_room,
44938+ .chars_in_buffer = gigaset_chars_in_buffer,
44939+ .brkchars = gigaset_brkchars,
44940+ .init_bchannel = gigaset_init_bchannel,
44941+ .close_bchannel = gigaset_close_bchannel,
44942+ .initbcshw = gigaset_initbcshw,
44943+ .freebcshw = gigaset_freebcshw,
44944+ .reinitbcshw = gigaset_reinitbcshw,
44945+ .initcshw = gigaset_initcshw,
44946+ .freecshw = gigaset_freecshw,
44947+ .set_modem_ctrl = gigaset_set_modem_ctrl,
44948+ .baud_rate = gigaset_baud_rate,
44949+ .set_line_ctrl = gigaset_set_line_ctrl,
44950+ .send_skb = gigaset_isoc_send_skb,
44951+ .handle_input = gigaset_isoc_input,
44952 };
44953
44954 /* bas_gigaset_init
44955diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
44956index 600c79b..3752bab 100644
44957--- a/drivers/isdn/gigaset/interface.c
44958+++ b/drivers/isdn/gigaset/interface.c
44959@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
44960 }
44961 tty->driver_data = cs;
44962
44963- ++cs->port.count;
44964+ atomic_inc(&cs->port.count);
44965
44966- if (cs->port.count == 1) {
44967+ if (atomic_read(&cs->port.count) == 1) {
44968 tty_port_tty_set(&cs->port, tty);
44969 cs->port.low_latency = 1;
44970 }
44971@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
44972
44973 if (!cs->connected)
44974 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
44975- else if (!cs->port.count)
44976+ else if (!atomic_read(&cs->port.count))
44977 dev_warn(cs->dev, "%s: device not opened\n", __func__);
44978- else if (!--cs->port.count)
44979+ else if (!atomic_dec_return(&cs->port.count))
44980 tty_port_tty_set(&cs->port, NULL);
44981
44982 mutex_unlock(&cs->mutex);
44983diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
44984index 8c91fd5..14f13ce 100644
44985--- a/drivers/isdn/gigaset/ser-gigaset.c
44986+++ b/drivers/isdn/gigaset/ser-gigaset.c
44987@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
44988 }
44989
44990 static const struct gigaset_ops ops = {
44991- gigaset_write_cmd,
44992- gigaset_write_room,
44993- gigaset_chars_in_buffer,
44994- gigaset_brkchars,
44995- gigaset_init_bchannel,
44996- gigaset_close_bchannel,
44997- gigaset_initbcshw,
44998- gigaset_freebcshw,
44999- gigaset_reinitbcshw,
45000- gigaset_initcshw,
45001- gigaset_freecshw,
45002- gigaset_set_modem_ctrl,
45003- gigaset_baud_rate,
45004- gigaset_set_line_ctrl,
45005- gigaset_m10x_send_skb, /* asyncdata.c */
45006- gigaset_m10x_input, /* asyncdata.c */
45007+ .write_cmd = gigaset_write_cmd,
45008+ .write_room = gigaset_write_room,
45009+ .chars_in_buffer = gigaset_chars_in_buffer,
45010+ .brkchars = gigaset_brkchars,
45011+ .init_bchannel = gigaset_init_bchannel,
45012+ .close_bchannel = gigaset_close_bchannel,
45013+ .initbcshw = gigaset_initbcshw,
45014+ .freebcshw = gigaset_freebcshw,
45015+ .reinitbcshw = gigaset_reinitbcshw,
45016+ .initcshw = gigaset_initcshw,
45017+ .freecshw = gigaset_freecshw,
45018+ .set_modem_ctrl = gigaset_set_modem_ctrl,
45019+ .baud_rate = gigaset_baud_rate,
45020+ .set_line_ctrl = gigaset_set_line_ctrl,
45021+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
45022+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
45023 };
45024
45025
45026diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
45027index 5f306e2..5342f88 100644
45028--- a/drivers/isdn/gigaset/usb-gigaset.c
45029+++ b/drivers/isdn/gigaset/usb-gigaset.c
45030@@ -543,7 +543,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
45031 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
45032 memcpy(cs->hw.usb->bchars, buf, 6);
45033 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
45034- 0, 0, &buf, 6, 2000);
45035+ 0, 0, buf, 6, 2000);
45036 }
45037
45038 static void gigaset_freebcshw(struct bc_state *bcs)
45039@@ -862,22 +862,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
45040 }
45041
45042 static const struct gigaset_ops ops = {
45043- gigaset_write_cmd,
45044- gigaset_write_room,
45045- gigaset_chars_in_buffer,
45046- gigaset_brkchars,
45047- gigaset_init_bchannel,
45048- gigaset_close_bchannel,
45049- gigaset_initbcshw,
45050- gigaset_freebcshw,
45051- gigaset_reinitbcshw,
45052- gigaset_initcshw,
45053- gigaset_freecshw,
45054- gigaset_set_modem_ctrl,
45055- gigaset_baud_rate,
45056- gigaset_set_line_ctrl,
45057- gigaset_m10x_send_skb,
45058- gigaset_m10x_input,
45059+ .write_cmd = gigaset_write_cmd,
45060+ .write_room = gigaset_write_room,
45061+ .chars_in_buffer = gigaset_chars_in_buffer,
45062+ .brkchars = gigaset_brkchars,
45063+ .init_bchannel = gigaset_init_bchannel,
45064+ .close_bchannel = gigaset_close_bchannel,
45065+ .initbcshw = gigaset_initbcshw,
45066+ .freebcshw = gigaset_freebcshw,
45067+ .reinitbcshw = gigaset_reinitbcshw,
45068+ .initcshw = gigaset_initcshw,
45069+ .freecshw = gigaset_freecshw,
45070+ .set_modem_ctrl = gigaset_set_modem_ctrl,
45071+ .baud_rate = gigaset_baud_rate,
45072+ .set_line_ctrl = gigaset_set_line_ctrl,
45073+ .send_skb = gigaset_m10x_send_skb,
45074+ .handle_input = gigaset_m10x_input,
45075 };
45076
45077 /*
45078diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
45079index 4d9b195..455075c 100644
45080--- a/drivers/isdn/hardware/avm/b1.c
45081+++ b/drivers/isdn/hardware/avm/b1.c
45082@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
45083 }
45084 if (left) {
45085 if (t4file->user) {
45086- if (copy_from_user(buf, dp, left))
45087+ if (left > sizeof buf || copy_from_user(buf, dp, left))
45088 return -EFAULT;
45089 } else {
45090 memcpy(buf, dp, left);
45091@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
45092 }
45093 if (left) {
45094 if (config->user) {
45095- if (copy_from_user(buf, dp, left))
45096+ if (left > sizeof buf || copy_from_user(buf, dp, left))
45097 return -EFAULT;
45098 } else {
45099 memcpy(buf, dp, left);
45100diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
45101index 9b856e1..fa03c92 100644
45102--- a/drivers/isdn/i4l/isdn_common.c
45103+++ b/drivers/isdn/i4l/isdn_common.c
45104@@ -1654,6 +1654,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
45105 } else
45106 return -EINVAL;
45107 case IIOCDBGVAR:
45108+ if (!capable(CAP_SYS_RAWIO))
45109+ return -EPERM;
45110 if (arg) {
45111 if (copy_to_user(argp, &dev, sizeof(ulong)))
45112 return -EFAULT;
45113diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
45114index 91d5730..336523e 100644
45115--- a/drivers/isdn/i4l/isdn_concap.c
45116+++ b/drivers/isdn/i4l/isdn_concap.c
45117@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
45118 }
45119
45120 struct concap_device_ops isdn_concap_reliable_dl_dops = {
45121- &isdn_concap_dl_data_req,
45122- &isdn_concap_dl_connect_req,
45123- &isdn_concap_dl_disconn_req
45124+ .data_req = &isdn_concap_dl_data_req,
45125+ .connect_req = &isdn_concap_dl_connect_req,
45126+ .disconn_req = &isdn_concap_dl_disconn_req
45127 };
45128
45129 /* The following should better go into a dedicated source file such that
45130diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
45131index bc91261..2ef7e36 100644
45132--- a/drivers/isdn/i4l/isdn_tty.c
45133+++ b/drivers/isdn/i4l/isdn_tty.c
45134@@ -1503,9 +1503,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
45135
45136 #ifdef ISDN_DEBUG_MODEM_OPEN
45137 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
45138- port->count);
45139+ atomic_read(&port->count));
45140 #endif
45141- port->count++;
45142+ atomic_inc(&port->count);
45143 port->tty = tty;
45144 /*
45145 * Start up serial port
45146@@ -1549,7 +1549,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
45147 #endif
45148 return;
45149 }
45150- if ((tty->count == 1) && (port->count != 1)) {
45151+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
45152 /*
45153 * Uh, oh. tty->count is 1, which means that the tty
45154 * structure will be freed. Info->count should always
45155@@ -1558,15 +1558,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
45156 * serial port won't be shutdown.
45157 */
45158 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
45159- "info->count is %d\n", port->count);
45160- port->count = 1;
45161+ "info->count is %d\n", atomic_read(&port->count));
45162+ atomic_set(&port->count, 1);
45163 }
45164- if (--port->count < 0) {
45165+ if (atomic_dec_return(&port->count) < 0) {
45166 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
45167- info->line, port->count);
45168- port->count = 0;
45169+ info->line, atomic_read(&port->count));
45170+ atomic_set(&port->count, 0);
45171 }
45172- if (port->count) {
45173+ if (atomic_read(&port->count)) {
45174 #ifdef ISDN_DEBUG_MODEM_OPEN
45175 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
45176 #endif
45177@@ -1620,7 +1620,7 @@ isdn_tty_hangup(struct tty_struct *tty)
45178 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
45179 return;
45180 isdn_tty_shutdown(info);
45181- port->count = 0;
45182+ atomic_set(&port->count, 0);
45183 port->flags &= ~ASYNC_NORMAL_ACTIVE;
45184 port->tty = NULL;
45185 wake_up_interruptible(&port->open_wait);
45186@@ -1965,7 +1965,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
45187 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
45188 modem_info *info = &dev->mdm.info[i];
45189
45190- if (info->port.count == 0)
45191+ if (atomic_read(&info->port.count) == 0)
45192 continue;
45193 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
45194 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
45195diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
45196index e2d4e58..40cd045 100644
45197--- a/drivers/isdn/i4l/isdn_x25iface.c
45198+++ b/drivers/isdn/i4l/isdn_x25iface.c
45199@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
45200
45201
45202 static struct concap_proto_ops ix25_pops = {
45203- &isdn_x25iface_proto_new,
45204- &isdn_x25iface_proto_del,
45205- &isdn_x25iface_proto_restart,
45206- &isdn_x25iface_proto_close,
45207- &isdn_x25iface_xmit,
45208- &isdn_x25iface_receive,
45209- &isdn_x25iface_connect_ind,
45210- &isdn_x25iface_disconn_ind
45211+ .proto_new = &isdn_x25iface_proto_new,
45212+ .proto_del = &isdn_x25iface_proto_del,
45213+ .restart = &isdn_x25iface_proto_restart,
45214+ .close = &isdn_x25iface_proto_close,
45215+ .encap_and_xmit = &isdn_x25iface_xmit,
45216+ .data_ind = &isdn_x25iface_receive,
45217+ .connect_ind = &isdn_x25iface_connect_ind,
45218+ .disconn_ind = &isdn_x25iface_disconn_ind
45219 };
45220
45221 /* error message helper function */
45222diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
45223index 358a574..b4987ea 100644
45224--- a/drivers/isdn/icn/icn.c
45225+++ b/drivers/isdn/icn/icn.c
45226@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
45227 if (count > len)
45228 count = len;
45229 if (user) {
45230- if (copy_from_user(msg, buf, count))
45231+ if (count > sizeof msg || copy_from_user(msg, buf, count))
45232 return -EFAULT;
45233 } else
45234 memcpy(msg, buf, count);
45235diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
45236index 87f7dff..7300125 100644
45237--- a/drivers/isdn/mISDN/dsp_cmx.c
45238+++ b/drivers/isdn/mISDN/dsp_cmx.c
45239@@ -1625,7 +1625,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
45240 static u16 dsp_count; /* last sample count */
45241 static int dsp_count_valid; /* if we have last sample count */
45242
45243-void
45244+void __intentional_overflow(-1)
45245 dsp_cmx_send(void *arg)
45246 {
45247 struct dsp_conf *conf;
45248diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
45249index 7dc93aa..9263d05 100644
45250--- a/drivers/lguest/core.c
45251+++ b/drivers/lguest/core.c
45252@@ -96,9 +96,17 @@ static __init int map_switcher(void)
45253 * The end address needs +1 because __get_vm_area allocates an
45254 * extra guard page, so we need space for that.
45255 */
45256+
45257+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
45258+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
45259+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
45260+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
45261+#else
45262 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
45263 VM_ALLOC, switcher_addr, switcher_addr
45264 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
45265+#endif
45266+
45267 if (!switcher_vma) {
45268 err = -ENOMEM;
45269 printk("lguest: could not map switcher pages high\n");
45270@@ -121,7 +129,7 @@ static __init int map_switcher(void)
45271 * Now the Switcher is mapped at the right address, we can't fail!
45272 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
45273 */
45274- memcpy(switcher_vma->addr, start_switcher_text,
45275+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
45276 end_switcher_text - start_switcher_text);
45277
45278 printk(KERN_INFO "lguest: mapped switcher at %p\n",
45279@@ -173,7 +181,7 @@ static void unmap_switcher(void)
45280 bool lguest_address_ok(const struct lguest *lg,
45281 unsigned long addr, unsigned long len)
45282 {
45283- return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr);
45284+ return addr+len <= lg->pfn_limit * PAGE_SIZE && (addr+len >= addr);
45285 }
45286
45287 /*
45288diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
45289index e3abebc9..6a35328 100644
45290--- a/drivers/lguest/page_tables.c
45291+++ b/drivers/lguest/page_tables.c
45292@@ -585,7 +585,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
45293 /*:*/
45294
45295 #ifdef CONFIG_X86_PAE
45296-static void release_pmd(pmd_t *spmd)
45297+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
45298 {
45299 /* If the entry's not present, there's nothing to release. */
45300 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
45301diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
45302index 30f2aef..391c748 100644
45303--- a/drivers/lguest/x86/core.c
45304+++ b/drivers/lguest/x86/core.c
45305@@ -60,7 +60,7 @@ static struct {
45306 /* Offset from where switcher.S was compiled to where we've copied it */
45307 static unsigned long switcher_offset(void)
45308 {
45309- return switcher_addr - (unsigned long)start_switcher_text;
45310+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
45311 }
45312
45313 /* This cpu's struct lguest_pages (after the Switcher text page) */
45314@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
45315 * These copies are pretty cheap, so we do them unconditionally: */
45316 /* Save the current Host top-level page directory.
45317 */
45318+
45319+#ifdef CONFIG_PAX_PER_CPU_PGD
45320+ pages->state.host_cr3 = read_cr3();
45321+#else
45322 pages->state.host_cr3 = __pa(current->mm->pgd);
45323+#endif
45324+
45325 /*
45326 * Set up the Guest's page tables to see this CPU's pages (and no
45327 * other CPU's pages).
45328@@ -494,7 +500,7 @@ void __init lguest_arch_host_init(void)
45329 * compiled-in switcher code and the high-mapped copy we just made.
45330 */
45331 for (i = 0; i < IDT_ENTRIES; i++)
45332- default_idt_entries[i] += switcher_offset();
45333+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
45334
45335 /*
45336 * Set up the Switcher's per-cpu areas.
45337@@ -577,7 +583,7 @@ void __init lguest_arch_host_init(void)
45338 * it will be undisturbed when we switch. To change %cs and jump we
45339 * need this structure to feed to Intel's "lcall" instruction.
45340 */
45341- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
45342+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
45343 lguest_entry.segment = LGUEST_CS;
45344
45345 /*
45346diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
45347index 40634b0..4f5855e 100644
45348--- a/drivers/lguest/x86/switcher_32.S
45349+++ b/drivers/lguest/x86/switcher_32.S
45350@@ -87,6 +87,7 @@
45351 #include <asm/page.h>
45352 #include <asm/segment.h>
45353 #include <asm/lguest.h>
45354+#include <asm/processor-flags.h>
45355
45356 // We mark the start of the code to copy
45357 // It's placed in .text tho it's never run here
45358@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
45359 // Changes type when we load it: damn Intel!
45360 // For after we switch over our page tables
45361 // That entry will be read-only: we'd crash.
45362+
45363+#ifdef CONFIG_PAX_KERNEXEC
45364+ mov %cr0, %edx
45365+ xor $X86_CR0_WP, %edx
45366+ mov %edx, %cr0
45367+#endif
45368+
45369 movl $(GDT_ENTRY_TSS*8), %edx
45370 ltr %dx
45371
45372@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
45373 // Let's clear it again for our return.
45374 // The GDT descriptor of the Host
45375 // Points to the table after two "size" bytes
45376- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
45377+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
45378 // Clear "used" from type field (byte 5, bit 2)
45379- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
45380+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
45381+
45382+#ifdef CONFIG_PAX_KERNEXEC
45383+ mov %cr0, %eax
45384+ xor $X86_CR0_WP, %eax
45385+ mov %eax, %cr0
45386+#endif
45387
45388 // Once our page table's switched, the Guest is live!
45389 // The Host fades as we run this final step.
45390@@ -295,13 +309,12 @@ deliver_to_host:
45391 // I consulted gcc, and it gave
45392 // These instructions, which I gladly credit:
45393 leal (%edx,%ebx,8), %eax
45394- movzwl (%eax),%edx
45395- movl 4(%eax), %eax
45396- xorw %ax, %ax
45397- orl %eax, %edx
45398+ movl 4(%eax), %edx
45399+ movw (%eax), %dx
45400 // Now the address of the handler's in %edx
45401 // We call it now: its "iret" drops us home.
45402- jmp *%edx
45403+ ljmp $__KERNEL_CS, $1f
45404+1: jmp *%edx
45405
45406 // Every interrupt can come to us here
45407 // But we must truly tell each apart.
45408diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
45409index a08e3ee..df8ade2 100644
45410--- a/drivers/md/bcache/closure.h
45411+++ b/drivers/md/bcache/closure.h
45412@@ -238,7 +238,7 @@ static inline void closure_set_stopped(struct closure *cl)
45413 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
45414 struct workqueue_struct *wq)
45415 {
45416- BUG_ON(object_is_on_stack(cl));
45417+ BUG_ON(object_starts_on_stack(cl));
45418 closure_set_ip(cl);
45419 cl->fn = fn;
45420 cl->wq = wq;
45421diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
45422index 3a57679..c58cdaf 100644
45423--- a/drivers/md/bitmap.c
45424+++ b/drivers/md/bitmap.c
45425@@ -1786,7 +1786,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
45426 chunk_kb ? "KB" : "B");
45427 if (bitmap->storage.file) {
45428 seq_printf(seq, ", file: ");
45429- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
45430+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
45431 }
45432
45433 seq_printf(seq, "\n");
45434diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
45435index c8a18e4..0ab43e5 100644
45436--- a/drivers/md/dm-ioctl.c
45437+++ b/drivers/md/dm-ioctl.c
45438@@ -1772,7 +1772,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
45439 cmd == DM_LIST_VERSIONS_CMD)
45440 return 0;
45441
45442- if ((cmd == DM_DEV_CREATE_CMD)) {
45443+ if (cmd == DM_DEV_CREATE_CMD) {
45444 if (!*param->name) {
45445 DMWARN("name not supplied when creating device");
45446 return -EINVAL;
45447diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
45448index 089d627..ef7352e 100644
45449--- a/drivers/md/dm-raid1.c
45450+++ b/drivers/md/dm-raid1.c
45451@@ -40,7 +40,7 @@ enum dm_raid1_error {
45452
45453 struct mirror {
45454 struct mirror_set *ms;
45455- atomic_t error_count;
45456+ atomic_unchecked_t error_count;
45457 unsigned long error_type;
45458 struct dm_dev *dev;
45459 sector_t offset;
45460@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
45461 struct mirror *m;
45462
45463 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
45464- if (!atomic_read(&m->error_count))
45465+ if (!atomic_read_unchecked(&m->error_count))
45466 return m;
45467
45468 return NULL;
45469@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
45470 * simple way to tell if a device has encountered
45471 * errors.
45472 */
45473- atomic_inc(&m->error_count);
45474+ atomic_inc_unchecked(&m->error_count);
45475
45476 if (test_and_set_bit(error_type, &m->error_type))
45477 return;
45478@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
45479 struct mirror *m = get_default_mirror(ms);
45480
45481 do {
45482- if (likely(!atomic_read(&m->error_count)))
45483+ if (likely(!atomic_read_unchecked(&m->error_count)))
45484 return m;
45485
45486 if (m-- == ms->mirror)
45487@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
45488 {
45489 struct mirror *default_mirror = get_default_mirror(m->ms);
45490
45491- return !atomic_read(&default_mirror->error_count);
45492+ return !atomic_read_unchecked(&default_mirror->error_count);
45493 }
45494
45495 static int mirror_available(struct mirror_set *ms, struct bio *bio)
45496@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
45497 */
45498 if (likely(region_in_sync(ms, region, 1)))
45499 m = choose_mirror(ms, bio->bi_iter.bi_sector);
45500- else if (m && atomic_read(&m->error_count))
45501+ else if (m && atomic_read_unchecked(&m->error_count))
45502 m = NULL;
45503
45504 if (likely(m))
45505@@ -936,7 +936,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
45506 }
45507
45508 ms->mirror[mirror].ms = ms;
45509- atomic_set(&(ms->mirror[mirror].error_count), 0);
45510+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
45511 ms->mirror[mirror].error_type = 0;
45512 ms->mirror[mirror].offset = offset;
45513
45514@@ -1351,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
45515 */
45516 static char device_status_char(struct mirror *m)
45517 {
45518- if (!atomic_read(&(m->error_count)))
45519+ if (!atomic_read_unchecked(&(m->error_count)))
45520 return 'A';
45521
45522 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
45523diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
45524index f478a4c..4b8e5ef 100644
45525--- a/drivers/md/dm-stats.c
45526+++ b/drivers/md/dm-stats.c
45527@@ -382,7 +382,7 @@ do_sync_free:
45528 synchronize_rcu_expedited();
45529 dm_stat_free(&s->rcu_head);
45530 } else {
45531- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
45532+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
45533 call_rcu(&s->rcu_head, dm_stat_free);
45534 }
45535 return 0;
45536@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
45537 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
45538 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
45539 ));
45540- ACCESS_ONCE(last->last_sector) = end_sector;
45541- ACCESS_ONCE(last->last_rw) = bi_rw;
45542+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
45543+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
45544 }
45545
45546 rcu_read_lock();
45547diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
45548index f8b37d4..5c5cafd 100644
45549--- a/drivers/md/dm-stripe.c
45550+++ b/drivers/md/dm-stripe.c
45551@@ -21,7 +21,7 @@ struct stripe {
45552 struct dm_dev *dev;
45553 sector_t physical_start;
45554
45555- atomic_t error_count;
45556+ atomic_unchecked_t error_count;
45557 };
45558
45559 struct stripe_c {
45560@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
45561 kfree(sc);
45562 return r;
45563 }
45564- atomic_set(&(sc->stripe[i].error_count), 0);
45565+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
45566 }
45567
45568 ti->private = sc;
45569@@ -332,7 +332,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
45570 DMEMIT("%d ", sc->stripes);
45571 for (i = 0; i < sc->stripes; i++) {
45572 DMEMIT("%s ", sc->stripe[i].dev->name);
45573- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
45574+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
45575 'D' : 'A';
45576 }
45577 buffer[i] = '\0';
45578@@ -377,8 +377,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
45579 */
45580 for (i = 0; i < sc->stripes; i++)
45581 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
45582- atomic_inc(&(sc->stripe[i].error_count));
45583- if (atomic_read(&(sc->stripe[i].error_count)) <
45584+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
45585+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
45586 DM_IO_ERROR_THRESHOLD)
45587 schedule_work(&sc->trigger_event);
45588 }
45589diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
45590index 6554d91..b0221c2 100644
45591--- a/drivers/md/dm-table.c
45592+++ b/drivers/md/dm-table.c
45593@@ -303,7 +303,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
45594 if (!dev_size)
45595 return 0;
45596
45597- if ((start >= dev_size) || (start + len > dev_size)) {
45598+ if ((start >= dev_size) || (len > dev_size - start)) {
45599 DMWARN("%s: %s too small for target: "
45600 "start=%llu, len=%llu, dev_size=%llu",
45601 dm_device_name(ti->table->md), bdevname(bdev, b),
45602diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
45603index 79f6941..b33b4e0 100644
45604--- a/drivers/md/dm-thin-metadata.c
45605+++ b/drivers/md/dm-thin-metadata.c
45606@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
45607 {
45608 pmd->info.tm = pmd->tm;
45609 pmd->info.levels = 2;
45610- pmd->info.value_type.context = pmd->data_sm;
45611+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
45612 pmd->info.value_type.size = sizeof(__le64);
45613 pmd->info.value_type.inc = data_block_inc;
45614 pmd->info.value_type.dec = data_block_dec;
45615@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
45616
45617 pmd->bl_info.tm = pmd->tm;
45618 pmd->bl_info.levels = 1;
45619- pmd->bl_info.value_type.context = pmd->data_sm;
45620+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
45621 pmd->bl_info.value_type.size = sizeof(__le64);
45622 pmd->bl_info.value_type.inc = data_block_inc;
45623 pmd->bl_info.value_type.dec = data_block_dec;
45624diff --git a/drivers/md/dm.c b/drivers/md/dm.c
45625index 8001fe9..83c927d 100644
45626--- a/drivers/md/dm.c
45627+++ b/drivers/md/dm.c
45628@@ -188,9 +188,9 @@ struct mapped_device {
45629 /*
45630 * Event handling.
45631 */
45632- atomic_t event_nr;
45633+ atomic_unchecked_t event_nr;
45634 wait_queue_head_t eventq;
45635- atomic_t uevent_seq;
45636+ atomic_unchecked_t uevent_seq;
45637 struct list_head uevent_list;
45638 spinlock_t uevent_lock; /* Protect access to uevent_list */
45639
45640@@ -1642,8 +1642,7 @@ static int dm_merge_bvec(struct request_queue *q,
45641 struct mapped_device *md = q->queuedata;
45642 struct dm_table *map = dm_get_live_table_fast(md);
45643 struct dm_target *ti;
45644- sector_t max_sectors;
45645- int max_size = 0;
45646+ sector_t max_sectors, max_size = 0;
45647
45648 if (unlikely(!map))
45649 goto out;
45650@@ -1658,8 +1657,16 @@ static int dm_merge_bvec(struct request_queue *q,
45651 max_sectors = min(max_io_len(bvm->bi_sector, ti),
45652 (sector_t) queue_max_sectors(q));
45653 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
45654- if (unlikely(max_size < 0)) /* this shouldn't _ever_ happen */
45655- max_size = 0;
45656+
45657+ /*
45658+ * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t
45659+ * to the targets' merge function since it holds sectors not bytes).
45660+ * Just doing this as an interim fix for stable@ because the more
45661+ * comprehensive cleanup of switching to sector_t will impact every
45662+ * DM target that implements a ->merge hook.
45663+ */
45664+ if (max_size > INT_MAX)
45665+ max_size = INT_MAX;
45666
45667 /*
45668 * merge_bvec_fn() returns number of bytes
45669@@ -1667,7 +1674,7 @@ static int dm_merge_bvec(struct request_queue *q,
45670 * max is precomputed maximal io size
45671 */
45672 if (max_size && ti->type->merge)
45673- max_size = ti->type->merge(ti, bvm, biovec, max_size);
45674+ max_size = ti->type->merge(ti, bvm, biovec, (int) max_size);
45675 /*
45676 * If the target doesn't support merge method and some of the devices
45677 * provided their merge_bvec method (we know this by looking for the
45678@@ -2163,8 +2170,8 @@ static struct mapped_device *alloc_dev(int minor)
45679 spin_lock_init(&md->deferred_lock);
45680 atomic_set(&md->holders, 1);
45681 atomic_set(&md->open_count, 0);
45682- atomic_set(&md->event_nr, 0);
45683- atomic_set(&md->uevent_seq, 0);
45684+ atomic_set_unchecked(&md->event_nr, 0);
45685+ atomic_set_unchecked(&md->uevent_seq, 0);
45686 INIT_LIST_HEAD(&md->uevent_list);
45687 INIT_LIST_HEAD(&md->table_devices);
45688 spin_lock_init(&md->uevent_lock);
45689@@ -2329,7 +2336,7 @@ static void event_callback(void *context)
45690
45691 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
45692
45693- atomic_inc(&md->event_nr);
45694+ atomic_inc_unchecked(&md->event_nr);
45695 wake_up(&md->eventq);
45696 }
45697
45698@@ -3175,18 +3182,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
45699
45700 uint32_t dm_next_uevent_seq(struct mapped_device *md)
45701 {
45702- return atomic_add_return(1, &md->uevent_seq);
45703+ return atomic_add_return_unchecked(1, &md->uevent_seq);
45704 }
45705
45706 uint32_t dm_get_event_nr(struct mapped_device *md)
45707 {
45708- return atomic_read(&md->event_nr);
45709+ return atomic_read_unchecked(&md->event_nr);
45710 }
45711
45712 int dm_wait_event(struct mapped_device *md, int event_nr)
45713 {
45714 return wait_event_interruptible(md->eventq,
45715- (event_nr != atomic_read(&md->event_nr)));
45716+ (event_nr != atomic_read_unchecked(&md->event_nr)));
45717 }
45718
45719 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
45720diff --git a/drivers/md/md.c b/drivers/md/md.c
45721index e47d1dd..ebc3480 100644
45722--- a/drivers/md/md.c
45723+++ b/drivers/md/md.c
45724@@ -191,10 +191,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
45725 * start build, activate spare
45726 */
45727 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
45728-static atomic_t md_event_count;
45729+static atomic_unchecked_t md_event_count;
45730 void md_new_event(struct mddev *mddev)
45731 {
45732- atomic_inc(&md_event_count);
45733+ atomic_inc_unchecked(&md_event_count);
45734 wake_up(&md_event_waiters);
45735 }
45736 EXPORT_SYMBOL_GPL(md_new_event);
45737@@ -204,7 +204,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
45738 */
45739 static void md_new_event_inintr(struct mddev *mddev)
45740 {
45741- atomic_inc(&md_event_count);
45742+ atomic_inc_unchecked(&md_event_count);
45743 wake_up(&md_event_waiters);
45744 }
45745
45746@@ -1442,7 +1442,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
45747 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
45748 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
45749 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
45750- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
45751+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
45752
45753 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
45754 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
45755@@ -1693,7 +1693,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
45756 else
45757 sb->resync_offset = cpu_to_le64(0);
45758
45759- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
45760+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
45761
45762 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
45763 sb->size = cpu_to_le64(mddev->dev_sectors);
45764@@ -2564,7 +2564,7 @@ __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
45765 static ssize_t
45766 errors_show(struct md_rdev *rdev, char *page)
45767 {
45768- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
45769+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
45770 }
45771
45772 static ssize_t
45773@@ -2573,7 +2573,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
45774 char *e;
45775 unsigned long n = simple_strtoul(buf, &e, 10);
45776 if (*buf && (*e == 0 || *e == '\n')) {
45777- atomic_set(&rdev->corrected_errors, n);
45778+ atomic_set_unchecked(&rdev->corrected_errors, n);
45779 return len;
45780 }
45781 return -EINVAL;
45782@@ -3009,8 +3009,8 @@ int md_rdev_init(struct md_rdev *rdev)
45783 rdev->sb_loaded = 0;
45784 rdev->bb_page = NULL;
45785 atomic_set(&rdev->nr_pending, 0);
45786- atomic_set(&rdev->read_errors, 0);
45787- atomic_set(&rdev->corrected_errors, 0);
45788+ atomic_set_unchecked(&rdev->read_errors, 0);
45789+ atomic_set_unchecked(&rdev->corrected_errors, 0);
45790
45791 INIT_LIST_HEAD(&rdev->same_set);
45792 init_waitqueue_head(&rdev->blocked_wait);
45793@@ -7083,7 +7083,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
45794
45795 spin_unlock(&pers_lock);
45796 seq_printf(seq, "\n");
45797- seq->poll_event = atomic_read(&md_event_count);
45798+ seq->poll_event = atomic_read_unchecked(&md_event_count);
45799 return 0;
45800 }
45801 if (v == (void*)2) {
45802@@ -7186,7 +7186,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
45803 return error;
45804
45805 seq = file->private_data;
45806- seq->poll_event = atomic_read(&md_event_count);
45807+ seq->poll_event = atomic_read_unchecked(&md_event_count);
45808 return error;
45809 }
45810
45811@@ -7203,7 +7203,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
45812 /* always allow read */
45813 mask = POLLIN | POLLRDNORM;
45814
45815- if (seq->poll_event != atomic_read(&md_event_count))
45816+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
45817 mask |= POLLERR | POLLPRI;
45818 return mask;
45819 }
45820@@ -7250,7 +7250,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
45821 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
45822 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
45823 (int)part_stat_read(&disk->part0, sectors[1]) -
45824- atomic_read(&disk->sync_io);
45825+ atomic_read_unchecked(&disk->sync_io);
45826 /* sync IO will cause sync_io to increase before the disk_stats
45827 * as sync_io is counted when a request starts, and
45828 * disk_stats is counted when it completes.
45829diff --git a/drivers/md/md.h b/drivers/md/md.h
45830index 318ca8f..31e4478 100644
45831--- a/drivers/md/md.h
45832+++ b/drivers/md/md.h
45833@@ -94,13 +94,13 @@ struct md_rdev {
45834 * only maintained for arrays that
45835 * support hot removal
45836 */
45837- atomic_t read_errors; /* number of consecutive read errors that
45838+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
45839 * we have tried to ignore.
45840 */
45841 struct timespec last_read_error; /* monotonic time since our
45842 * last read error
45843 */
45844- atomic_t corrected_errors; /* number of corrected read errors,
45845+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
45846 * for reporting to userspace and storing
45847 * in superblock.
45848 */
45849@@ -476,7 +476,7 @@ extern void mddev_unlock(struct mddev *mddev);
45850
45851 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
45852 {
45853- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
45854+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
45855 }
45856
45857 struct md_personality
45858diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
45859index e8a9042..35bd145 100644
45860--- a/drivers/md/persistent-data/dm-space-map-metadata.c
45861+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
45862@@ -683,7 +683,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
45863 * Flick into a mode where all blocks get allocated in the new area.
45864 */
45865 smm->begin = old_len;
45866- memcpy(sm, &bootstrap_ops, sizeof(*sm));
45867+ memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
45868
45869 /*
45870 * Extend.
45871@@ -714,7 +714,7 @@ out:
45872 /*
45873 * Switch back to normal behaviour.
45874 */
45875- memcpy(sm, &ops, sizeof(*sm));
45876+ memcpy((void *)sm, &ops, sizeof(*sm));
45877 return r;
45878 }
45879
45880diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
45881index 3e6d115..ffecdeb 100644
45882--- a/drivers/md/persistent-data/dm-space-map.h
45883+++ b/drivers/md/persistent-data/dm-space-map.h
45884@@ -71,6 +71,7 @@ struct dm_space_map {
45885 dm_sm_threshold_fn fn,
45886 void *context);
45887 };
45888+typedef struct dm_space_map __no_const dm_space_map_no_const;
45889
45890 /*----------------------------------------------------------------*/
45891
45892diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
45893index 3b5d7f7..903391c 100644
45894--- a/drivers/md/raid0.c
45895+++ b/drivers/md/raid0.c
45896@@ -517,6 +517,9 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
45897 ? (sector & (chunk_sects-1))
45898 : sector_div(sector, chunk_sects));
45899
45900+ /* Restore due to sector_div */
45901+ sector = bio->bi_iter.bi_sector;
45902+
45903 if (sectors < bio_sectors(bio)) {
45904 split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
45905 bio_chain(split, bio);
45906@@ -524,7 +527,6 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
45907 split = bio;
45908 }
45909
45910- sector = bio->bi_iter.bi_sector;
45911 zone = find_zone(mddev->private, &sector);
45912 tmp_dev = map_sector(mddev, zone, sector, &sector);
45913 split->bi_bdev = tmp_dev->bdev;
45914diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
45915index d34e238..34f8d98 100644
45916--- a/drivers/md/raid1.c
45917+++ b/drivers/md/raid1.c
45918@@ -1922,7 +1922,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
45919 if (r1_sync_page_io(rdev, sect, s,
45920 bio->bi_io_vec[idx].bv_page,
45921 READ) != 0)
45922- atomic_add(s, &rdev->corrected_errors);
45923+ atomic_add_unchecked(s, &rdev->corrected_errors);
45924 }
45925 sectors -= s;
45926 sect += s;
45927@@ -2155,7 +2155,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
45928 !test_bit(Faulty, &rdev->flags)) {
45929 if (r1_sync_page_io(rdev, sect, s,
45930 conf->tmppage, READ)) {
45931- atomic_add(s, &rdev->corrected_errors);
45932+ atomic_add_unchecked(s, &rdev->corrected_errors);
45933 printk(KERN_INFO
45934 "md/raid1:%s: read error corrected "
45935 "(%d sectors at %llu on %s)\n",
45936diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
45937index a7196c4..439f012 100644
45938--- a/drivers/md/raid10.c
45939+++ b/drivers/md/raid10.c
45940@@ -1934,7 +1934,7 @@ static void end_sync_read(struct bio *bio, int error)
45941 /* The write handler will notice the lack of
45942 * R10BIO_Uptodate and record any errors etc
45943 */
45944- atomic_add(r10_bio->sectors,
45945+ atomic_add_unchecked(r10_bio->sectors,
45946 &conf->mirrors[d].rdev->corrected_errors);
45947
45948 /* for reconstruct, we always reschedule after a read.
45949@@ -2291,7 +2291,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
45950 {
45951 struct timespec cur_time_mon;
45952 unsigned long hours_since_last;
45953- unsigned int read_errors = atomic_read(&rdev->read_errors);
45954+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
45955
45956 ktime_get_ts(&cur_time_mon);
45957
45958@@ -2313,9 +2313,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
45959 * overflowing the shift of read_errors by hours_since_last.
45960 */
45961 if (hours_since_last >= 8 * sizeof(read_errors))
45962- atomic_set(&rdev->read_errors, 0);
45963+ atomic_set_unchecked(&rdev->read_errors, 0);
45964 else
45965- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
45966+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
45967 }
45968
45969 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
45970@@ -2369,8 +2369,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45971 return;
45972
45973 check_decay_read_errors(mddev, rdev);
45974- atomic_inc(&rdev->read_errors);
45975- if (atomic_read(&rdev->read_errors) > max_read_errors) {
45976+ atomic_inc_unchecked(&rdev->read_errors);
45977+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
45978 char b[BDEVNAME_SIZE];
45979 bdevname(rdev->bdev, b);
45980
45981@@ -2378,7 +2378,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45982 "md/raid10:%s: %s: Raid device exceeded "
45983 "read_error threshold [cur %d:max %d]\n",
45984 mdname(mddev), b,
45985- atomic_read(&rdev->read_errors), max_read_errors);
45986+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
45987 printk(KERN_NOTICE
45988 "md/raid10:%s: %s: Failing raid device\n",
45989 mdname(mddev), b);
45990@@ -2533,7 +2533,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45991 sect +
45992 choose_data_offset(r10_bio, rdev)),
45993 bdevname(rdev->bdev, b));
45994- atomic_add(s, &rdev->corrected_errors);
45995+ atomic_add_unchecked(s, &rdev->corrected_errors);
45996 }
45997
45998 rdev_dec_pending(rdev, mddev);
45999diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
46000index cd2f96b..3876e63 100644
46001--- a/drivers/md/raid5.c
46002+++ b/drivers/md/raid5.c
46003@@ -947,23 +947,23 @@ async_copy_data(int frombio, struct bio *bio, struct page **page,
46004 struct bio_vec bvl;
46005 struct bvec_iter iter;
46006 struct page *bio_page;
46007- int page_offset;
46008+ s64 page_offset;
46009 struct async_submit_ctl submit;
46010 enum async_tx_flags flags = 0;
46011
46012 if (bio->bi_iter.bi_sector >= sector)
46013- page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
46014+ page_offset = (s64)(bio->bi_iter.bi_sector - sector) * 512;
46015 else
46016- page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
46017+ page_offset = (s64)(sector - bio->bi_iter.bi_sector) * -512;
46018
46019 if (frombio)
46020 flags |= ASYNC_TX_FENCE;
46021 init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
46022
46023 bio_for_each_segment(bvl, bio, iter) {
46024- int len = bvl.bv_len;
46025- int clen;
46026- int b_offset = 0;
46027+ s64 len = bvl.bv_len;
46028+ s64 clen;
46029+ s64 b_offset = 0;
46030
46031 if (page_offset < 0) {
46032 b_offset = -page_offset;
46033@@ -1727,6 +1727,10 @@ static int grow_one_stripe(struct r5conf *conf, int hash)
46034 return 1;
46035 }
46036
46037+#ifdef CONFIG_GRKERNSEC_HIDESYM
46038+static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0);
46039+#endif
46040+
46041 static int grow_stripes(struct r5conf *conf, int num)
46042 {
46043 struct kmem_cache *sc;
46044@@ -1738,7 +1742,11 @@ static int grow_stripes(struct r5conf *conf, int num)
46045 "raid%d-%s", conf->level, mdname(conf->mddev));
46046 else
46047 sprintf(conf->cache_name[0],
46048+#ifdef CONFIG_GRKERNSEC_HIDESYM
46049+ "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id));
46050+#else
46051 "raid%d-%p", conf->level, conf->mddev);
46052+#endif
46053 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
46054
46055 conf->active_name = 0;
46056@@ -2014,21 +2022,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
46057 mdname(conf->mddev), STRIPE_SECTORS,
46058 (unsigned long long)s,
46059 bdevname(rdev->bdev, b));
46060- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
46061+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
46062 clear_bit(R5_ReadError, &sh->dev[i].flags);
46063 clear_bit(R5_ReWrite, &sh->dev[i].flags);
46064 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
46065 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
46066
46067- if (atomic_read(&rdev->read_errors))
46068- atomic_set(&rdev->read_errors, 0);
46069+ if (atomic_read_unchecked(&rdev->read_errors))
46070+ atomic_set_unchecked(&rdev->read_errors, 0);
46071 } else {
46072 const char *bdn = bdevname(rdev->bdev, b);
46073 int retry = 0;
46074 int set_bad = 0;
46075
46076 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
46077- atomic_inc(&rdev->read_errors);
46078+ atomic_inc_unchecked(&rdev->read_errors);
46079 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
46080 printk_ratelimited(
46081 KERN_WARNING
46082@@ -2056,7 +2064,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
46083 mdname(conf->mddev),
46084 (unsigned long long)s,
46085 bdn);
46086- } else if (atomic_read(&rdev->read_errors)
46087+ } else if (atomic_read_unchecked(&rdev->read_errors)
46088 > conf->max_nr_stripes)
46089 printk(KERN_WARNING
46090 "md/raid:%s: Too many read errors, failing device %s.\n",
46091diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
46092index 983db75..ef9248c 100644
46093--- a/drivers/media/dvb-core/dvbdev.c
46094+++ b/drivers/media/dvb-core/dvbdev.c
46095@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
46096 const struct dvb_device *template, void *priv, int type)
46097 {
46098 struct dvb_device *dvbdev;
46099- struct file_operations *dvbdevfops;
46100+ file_operations_no_const *dvbdevfops;
46101 struct device *clsdev;
46102 int minor;
46103 int id;
46104diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
46105index 6ad22b6..6e90e2a 100644
46106--- a/drivers/media/dvb-frontends/af9033.h
46107+++ b/drivers/media/dvb-frontends/af9033.h
46108@@ -96,6 +96,6 @@ struct af9033_ops {
46109 int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
46110 int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid,
46111 int onoff);
46112-};
46113+} __no_const;
46114
46115 #endif /* AF9033_H */
46116diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
46117index 9b6c3bb..baeb5c7 100644
46118--- a/drivers/media/dvb-frontends/dib3000.h
46119+++ b/drivers/media/dvb-frontends/dib3000.h
46120@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
46121 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
46122 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
46123 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
46124-};
46125+} __no_const;
46126
46127 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
46128 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
46129diff --git a/drivers/media/dvb-frontends/dib7000p.h b/drivers/media/dvb-frontends/dib7000p.h
46130index 1fea0e9..321ce8f 100644
46131--- a/drivers/media/dvb-frontends/dib7000p.h
46132+++ b/drivers/media/dvb-frontends/dib7000p.h
46133@@ -64,7 +64,7 @@ struct dib7000p_ops {
46134 int (*get_adc_power)(struct dvb_frontend *fe);
46135 int (*slave_reset)(struct dvb_frontend *fe);
46136 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg);
46137-};
46138+} __no_const;
46139
46140 #if IS_ENABLED(CONFIG_DVB_DIB7000P)
46141 void *dib7000p_attach(struct dib7000p_ops *ops);
46142diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h
46143index 84cc103..5780c54 100644
46144--- a/drivers/media/dvb-frontends/dib8000.h
46145+++ b/drivers/media/dvb-frontends/dib8000.h
46146@@ -61,7 +61,7 @@ struct dib8000_ops {
46147 int (*pid_filter_ctrl)(struct dvb_frontend *fe, u8 onoff);
46148 int (*pid_filter)(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff);
46149 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg);
46150-};
46151+} __no_const;
46152
46153 #if IS_ENABLED(CONFIG_DVB_DIB8000)
46154 void *dib8000_attach(struct dib8000_ops *ops);
46155diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
46156index 860c98fc..497fa25 100644
46157--- a/drivers/media/pci/cx88/cx88-video.c
46158+++ b/drivers/media/pci/cx88/cx88-video.c
46159@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
46160
46161 /* ------------------------------------------------------------------ */
46162
46163-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46164-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46165-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46166+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46167+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46168+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46169
46170 module_param_array(video_nr, int, NULL, 0444);
46171 module_param_array(vbi_nr, int, NULL, 0444);
46172diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
46173index 802642d..5534900 100644
46174--- a/drivers/media/pci/ivtv/ivtv-driver.c
46175+++ b/drivers/media/pci/ivtv/ivtv-driver.c
46176@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
46177 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
46178
46179 /* ivtv instance counter */
46180-static atomic_t ivtv_instance = ATOMIC_INIT(0);
46181+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
46182
46183 /* Parameter declarations */
46184 static int cardtype[IVTV_MAX_CARDS];
46185diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
46186index 570d119..ed25830 100644
46187--- a/drivers/media/pci/solo6x10/solo6x10-core.c
46188+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
46189@@ -424,7 +424,7 @@ static void solo_device_release(struct device *dev)
46190
46191 static int solo_sysfs_init(struct solo_dev *solo_dev)
46192 {
46193- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
46194+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
46195 struct device *dev = &solo_dev->dev;
46196 const char *driver;
46197 int i;
46198diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
46199index 7ddc767..1c24361 100644
46200--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
46201+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
46202@@ -351,7 +351,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
46203
46204 int solo_g723_init(struct solo_dev *solo_dev)
46205 {
46206- static struct snd_device_ops ops = { NULL };
46207+ static struct snd_device_ops ops = { };
46208 struct snd_card *card;
46209 struct snd_kcontrol_new kctl;
46210 char name[32];
46211diff --git a/drivers/media/pci/solo6x10/solo6x10-p2m.c b/drivers/media/pci/solo6x10/solo6x10-p2m.c
46212index 8c84846..27b4f83 100644
46213--- a/drivers/media/pci/solo6x10/solo6x10-p2m.c
46214+++ b/drivers/media/pci/solo6x10/solo6x10-p2m.c
46215@@ -73,7 +73,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
46216
46217 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
46218 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
46219- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
46220+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
46221 if (p2m_id < 0)
46222 p2m_id = -p2m_id;
46223 }
46224diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
46225index 1ca54b0..7d7cb9a 100644
46226--- a/drivers/media/pci/solo6x10/solo6x10.h
46227+++ b/drivers/media/pci/solo6x10/solo6x10.h
46228@@ -218,7 +218,7 @@ struct solo_dev {
46229
46230 /* P2M DMA Engine */
46231 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
46232- atomic_t p2m_count;
46233+ atomic_unchecked_t p2m_count;
46234 int p2m_jiffies;
46235 unsigned int p2m_timeouts;
46236
46237diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
46238index c135165..dc69499 100644
46239--- a/drivers/media/pci/tw68/tw68-core.c
46240+++ b/drivers/media/pci/tw68/tw68-core.c
46241@@ -60,7 +60,7 @@ static unsigned int card[] = {[0 ... (TW68_MAXBOARDS - 1)] = UNSET };
46242 module_param_array(card, int, NULL, 0444);
46243 MODULE_PARM_DESC(card, "card type");
46244
46245-static atomic_t tw68_instance = ATOMIC_INIT(0);
46246+static atomic_unchecked_t tw68_instance = ATOMIC_INIT(0);
46247
46248 /* ------------------------------------------------------------------ */
46249
46250diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
46251index ba2d8f9..1566684 100644
46252--- a/drivers/media/platform/omap/omap_vout.c
46253+++ b/drivers/media/platform/omap/omap_vout.c
46254@@ -63,7 +63,6 @@ enum omap_vout_channels {
46255 OMAP_VIDEO2,
46256 };
46257
46258-static struct videobuf_queue_ops video_vbq_ops;
46259 /* Variables configurable through module params*/
46260 static u32 video1_numbuffers = 3;
46261 static u32 video2_numbuffers = 3;
46262@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
46263 {
46264 struct videobuf_queue *q;
46265 struct omap_vout_device *vout = NULL;
46266+ static struct videobuf_queue_ops video_vbq_ops = {
46267+ .buf_setup = omap_vout_buffer_setup,
46268+ .buf_prepare = omap_vout_buffer_prepare,
46269+ .buf_release = omap_vout_buffer_release,
46270+ .buf_queue = omap_vout_buffer_queue,
46271+ };
46272
46273 vout = video_drvdata(file);
46274 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
46275@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
46276 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
46277
46278 q = &vout->vbq;
46279- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
46280- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
46281- video_vbq_ops.buf_release = omap_vout_buffer_release;
46282- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
46283 spin_lock_init(&vout->vbq_lock);
46284
46285 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
46286diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
46287index fb2acc5..a2fcbdc4 100644
46288--- a/drivers/media/platform/s5p-tv/mixer.h
46289+++ b/drivers/media/platform/s5p-tv/mixer.h
46290@@ -156,7 +156,7 @@ struct mxr_layer {
46291 /** layer index (unique identifier) */
46292 int idx;
46293 /** callbacks for layer methods */
46294- struct mxr_layer_ops ops;
46295+ struct mxr_layer_ops *ops;
46296 /** format array */
46297 const struct mxr_format **fmt_array;
46298 /** size of format array */
46299diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46300index 74344c7..a39e70e 100644
46301--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46302+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46303@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
46304 {
46305 struct mxr_layer *layer;
46306 int ret;
46307- struct mxr_layer_ops ops = {
46308+ static struct mxr_layer_ops ops = {
46309 .release = mxr_graph_layer_release,
46310 .buffer_set = mxr_graph_buffer_set,
46311 .stream_set = mxr_graph_stream_set,
46312diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
46313index b713403..53cb5ad 100644
46314--- a/drivers/media/platform/s5p-tv/mixer_reg.c
46315+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
46316@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
46317 layer->update_buf = next;
46318 }
46319
46320- layer->ops.buffer_set(layer, layer->update_buf);
46321+ layer->ops->buffer_set(layer, layer->update_buf);
46322
46323 if (done && done != layer->shadow_buf)
46324 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
46325diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
46326index 72d4f2e..4b2ea0d 100644
46327--- a/drivers/media/platform/s5p-tv/mixer_video.c
46328+++ b/drivers/media/platform/s5p-tv/mixer_video.c
46329@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
46330 layer->geo.src.height = layer->geo.src.full_height;
46331
46332 mxr_geometry_dump(mdev, &layer->geo);
46333- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46334+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46335 mxr_geometry_dump(mdev, &layer->geo);
46336 }
46337
46338@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
46339 layer->geo.dst.full_width = mbus_fmt.width;
46340 layer->geo.dst.full_height = mbus_fmt.height;
46341 layer->geo.dst.field = mbus_fmt.field;
46342- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46343+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46344
46345 mxr_geometry_dump(mdev, &layer->geo);
46346 }
46347@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
46348 /* set source size to highest accepted value */
46349 geo->src.full_width = max(geo->dst.full_width, pix->width);
46350 geo->src.full_height = max(geo->dst.full_height, pix->height);
46351- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46352+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46353 mxr_geometry_dump(mdev, &layer->geo);
46354 /* set cropping to total visible screen */
46355 geo->src.width = pix->width;
46356@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
46357 geo->src.x_offset = 0;
46358 geo->src.y_offset = 0;
46359 /* assure consistency of geometry */
46360- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
46361+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
46362 mxr_geometry_dump(mdev, &layer->geo);
46363 /* set full size to lowest possible value */
46364 geo->src.full_width = 0;
46365 geo->src.full_height = 0;
46366- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46367+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46368 mxr_geometry_dump(mdev, &layer->geo);
46369
46370 /* returning results */
46371@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
46372 target->width = s->r.width;
46373 target->height = s->r.height;
46374
46375- layer->ops.fix_geometry(layer, stage, s->flags);
46376+ layer->ops->fix_geometry(layer, stage, s->flags);
46377
46378 /* retrieve update selection rectangle */
46379 res.left = target->x_offset;
46380@@ -938,13 +938,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
46381 mxr_output_get(mdev);
46382
46383 mxr_layer_update_output(layer);
46384- layer->ops.format_set(layer);
46385+ layer->ops->format_set(layer);
46386 /* enabling layer in hardware */
46387 spin_lock_irqsave(&layer->enq_slock, flags);
46388 layer->state = MXR_LAYER_STREAMING;
46389 spin_unlock_irqrestore(&layer->enq_slock, flags);
46390
46391- layer->ops.stream_set(layer, MXR_ENABLE);
46392+ layer->ops->stream_set(layer, MXR_ENABLE);
46393 mxr_streamer_get(mdev);
46394
46395 return 0;
46396@@ -1014,7 +1014,7 @@ static void stop_streaming(struct vb2_queue *vq)
46397 spin_unlock_irqrestore(&layer->enq_slock, flags);
46398
46399 /* disabling layer in hardware */
46400- layer->ops.stream_set(layer, MXR_DISABLE);
46401+ layer->ops->stream_set(layer, MXR_DISABLE);
46402 /* remove one streamer */
46403 mxr_streamer_put(mdev);
46404 /* allow changes in output configuration */
46405@@ -1052,8 +1052,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
46406
46407 void mxr_layer_release(struct mxr_layer *layer)
46408 {
46409- if (layer->ops.release)
46410- layer->ops.release(layer);
46411+ if (layer->ops->release)
46412+ layer->ops->release(layer);
46413 }
46414
46415 void mxr_base_layer_release(struct mxr_layer *layer)
46416@@ -1079,7 +1079,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
46417
46418 layer->mdev = mdev;
46419 layer->idx = idx;
46420- layer->ops = *ops;
46421+ layer->ops = ops;
46422
46423 spin_lock_init(&layer->enq_slock);
46424 INIT_LIST_HEAD(&layer->enq_list);
46425diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46426index c9388c4..ce71ece 100644
46427--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46428+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46429@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
46430 {
46431 struct mxr_layer *layer;
46432 int ret;
46433- struct mxr_layer_ops ops = {
46434+ static struct mxr_layer_ops ops = {
46435 .release = mxr_vp_layer_release,
46436 .buffer_set = mxr_vp_buffer_set,
46437 .stream_set = mxr_vp_stream_set,
46438diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
46439index 82affae..42833ec 100644
46440--- a/drivers/media/radio/radio-cadet.c
46441+++ b/drivers/media/radio/radio-cadet.c
46442@@ -333,6 +333,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
46443 unsigned char readbuf[RDS_BUFFER];
46444 int i = 0;
46445
46446+ if (count > RDS_BUFFER)
46447+ return -EFAULT;
46448 mutex_lock(&dev->lock);
46449 if (dev->rdsstat == 0)
46450 cadet_start_rds(dev);
46451@@ -349,8 +351,9 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
46452 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
46453 mutex_unlock(&dev->lock);
46454
46455- if (i && copy_to_user(data, readbuf, i))
46456- return -EFAULT;
46457+ if (i > sizeof(readbuf) || (i && copy_to_user(data, readbuf, i)))
46458+ i = -EFAULT;
46459+
46460 return i;
46461 }
46462
46463diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
46464index 5236035..c622c74 100644
46465--- a/drivers/media/radio/radio-maxiradio.c
46466+++ b/drivers/media/radio/radio-maxiradio.c
46467@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
46468 /* TEA5757 pin mappings */
46469 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
46470
46471-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
46472+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
46473
46474 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
46475 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
46476diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
46477index 050b3bb..79f62b9 100644
46478--- a/drivers/media/radio/radio-shark.c
46479+++ b/drivers/media/radio/radio-shark.c
46480@@ -79,7 +79,7 @@ struct shark_device {
46481 u32 last_val;
46482 };
46483
46484-static atomic_t shark_instance = ATOMIC_INIT(0);
46485+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
46486
46487 static void shark_write_val(struct snd_tea575x *tea, u32 val)
46488 {
46489diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
46490index 8654e0d..0608a64 100644
46491--- a/drivers/media/radio/radio-shark2.c
46492+++ b/drivers/media/radio/radio-shark2.c
46493@@ -74,7 +74,7 @@ struct shark_device {
46494 u8 *transfer_buffer;
46495 };
46496
46497-static atomic_t shark_instance = ATOMIC_INIT(0);
46498+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
46499
46500 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
46501 {
46502diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
46503index dccf586..d5db411 100644
46504--- a/drivers/media/radio/radio-si476x.c
46505+++ b/drivers/media/radio/radio-si476x.c
46506@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
46507 struct si476x_radio *radio;
46508 struct v4l2_ctrl *ctrl;
46509
46510- static atomic_t instance = ATOMIC_INIT(0);
46511+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
46512
46513 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
46514 if (!radio)
46515diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
46516index 704397f..4d05977 100644
46517--- a/drivers/media/radio/wl128x/fmdrv_common.c
46518+++ b/drivers/media/radio/wl128x/fmdrv_common.c
46519@@ -71,7 +71,7 @@ module_param(default_rds_buf, uint, 0444);
46520 MODULE_PARM_DESC(rds_buf, "RDS buffer entries");
46521
46522 /* Radio Nr */
46523-static u32 radio_nr = -1;
46524+static int radio_nr = -1;
46525 module_param(radio_nr, int, 0444);
46526 MODULE_PARM_DESC(radio_nr, "Radio Nr");
46527
46528diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
46529index 9fd1527..8927230 100644
46530--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
46531+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
46532@@ -50,29 +50,73 @@ static struct dvb_usb_device_properties cinergyt2_properties;
46533
46534 static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
46535 {
46536- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
46537- char result[64];
46538- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
46539- sizeof(result), 0);
46540+ char *buf;
46541+ char *result;
46542+ int retval;
46543+
46544+ buf = kmalloc(2, GFP_KERNEL);
46545+ if (buf == NULL)
46546+ return -ENOMEM;
46547+ result = kmalloc(64, GFP_KERNEL);
46548+ if (result == NULL) {
46549+ kfree(buf);
46550+ return -ENOMEM;
46551+ }
46552+
46553+ buf[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
46554+ buf[1] = enable ? 1 : 0;
46555+
46556+ retval = dvb_usb_generic_rw(adap->dev, buf, 2, result, 64, 0);
46557+
46558+ kfree(buf);
46559+ kfree(result);
46560+ return retval;
46561 }
46562
46563 static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
46564 {
46565- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
46566- char state[3];
46567- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
46568+ char *buf;
46569+ char *state;
46570+ int retval;
46571+
46572+ buf = kmalloc(2, GFP_KERNEL);
46573+ if (buf == NULL)
46574+ return -ENOMEM;
46575+ state = kmalloc(3, GFP_KERNEL);
46576+ if (state == NULL) {
46577+ kfree(buf);
46578+ return -ENOMEM;
46579+ }
46580+
46581+ buf[0] = CINERGYT2_EP1_SLEEP_MODE;
46582+ buf[1] = enable ? 1 : 0;
46583+
46584+ retval = dvb_usb_generic_rw(d, buf, 2, state, 3, 0);
46585+
46586+ kfree(buf);
46587+ kfree(state);
46588+ return retval;
46589 }
46590
46591 static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
46592 {
46593- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
46594- char state[3];
46595+ char *query;
46596+ char *state;
46597 int ret;
46598+ query = kmalloc(1, GFP_KERNEL);
46599+ if (query == NULL)
46600+ return -ENOMEM;
46601+ state = kmalloc(3, GFP_KERNEL);
46602+ if (state == NULL) {
46603+ kfree(query);
46604+ return -ENOMEM;
46605+ }
46606+
46607+ query[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
46608
46609 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
46610
46611- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
46612- sizeof(state), 0);
46613+ ret = dvb_usb_generic_rw(adap->dev, query, 1, state, 3, 0);
46614 if (ret < 0) {
46615 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
46616 "state info\n");
46617@@ -80,7 +124,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
46618
46619 /* Copy this pointer as we are gonna need it in the release phase */
46620 cinergyt2_usb_device = adap->dev;
46621-
46622+ kfree(query);
46623+ kfree(state);
46624 return 0;
46625 }
46626
46627@@ -141,12 +186,23 @@ static int repeatable_keys[] = {
46628 static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46629 {
46630 struct cinergyt2_state *st = d->priv;
46631- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
46632+ u8 *key, *cmd;
46633 int i;
46634
46635+ cmd = kmalloc(1, GFP_KERNEL);
46636+ if (cmd == NULL)
46637+ return -EINVAL;
46638+ key = kzalloc(5, GFP_KERNEL);
46639+ if (key == NULL) {
46640+ kfree(cmd);
46641+ return -EINVAL;
46642+ }
46643+
46644+ cmd[0] = CINERGYT2_EP1_GET_RC_EVENTS;
46645+
46646 *state = REMOTE_NO_KEY_PRESSED;
46647
46648- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
46649+ dvb_usb_generic_rw(d, cmd, 1, key, 5, 0);
46650 if (key[4] == 0xff) {
46651 /* key repeat */
46652 st->rc_counter++;
46653@@ -157,12 +213,12 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46654 *event = d->last_event;
46655 deb_rc("repeat key, event %x\n",
46656 *event);
46657- return 0;
46658+ goto out;
46659 }
46660 }
46661 deb_rc("repeated key (non repeatable)\n");
46662 }
46663- return 0;
46664+ goto out;
46665 }
46666
46667 /* hack to pass checksum on the custom field */
46668@@ -174,6 +230,9 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46669
46670 deb_rc("key: %*ph\n", 5, key);
46671 }
46672+out:
46673+ kfree(cmd);
46674+ kfree(key);
46675 return 0;
46676 }
46677
46678diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46679index c890fe4..f9b2ae6 100644
46680--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46681+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46682@@ -145,103 +145,176 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
46683 fe_status_t *status)
46684 {
46685 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46686- struct dvbt_get_status_msg result;
46687- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46688+ struct dvbt_get_status_msg *result;
46689+ u8 *cmd;
46690 int ret;
46691
46692- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
46693- sizeof(result), 0);
46694+ cmd = kmalloc(1, GFP_KERNEL);
46695+ if (cmd == NULL)
46696+ return -ENOMEM;
46697+ result = kmalloc(sizeof(*result), GFP_KERNEL);
46698+ if (result == NULL) {
46699+ kfree(cmd);
46700+ return -ENOMEM;
46701+ }
46702+
46703+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46704+
46705+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)result,
46706+ sizeof(*result), 0);
46707 if (ret < 0)
46708- return ret;
46709+ goto out;
46710
46711 *status = 0;
46712
46713- if (0xffff - le16_to_cpu(result.gain) > 30)
46714+ if (0xffff - le16_to_cpu(result->gain) > 30)
46715 *status |= FE_HAS_SIGNAL;
46716- if (result.lock_bits & (1 << 6))
46717+ if (result->lock_bits & (1 << 6))
46718 *status |= FE_HAS_LOCK;
46719- if (result.lock_bits & (1 << 5))
46720+ if (result->lock_bits & (1 << 5))
46721 *status |= FE_HAS_SYNC;
46722- if (result.lock_bits & (1 << 4))
46723+ if (result->lock_bits & (1 << 4))
46724 *status |= FE_HAS_CARRIER;
46725- if (result.lock_bits & (1 << 1))
46726+ if (result->lock_bits & (1 << 1))
46727 *status |= FE_HAS_VITERBI;
46728
46729 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
46730 (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
46731 *status &= ~FE_HAS_LOCK;
46732
46733- return 0;
46734+out:
46735+ kfree(cmd);
46736+ kfree(result);
46737+ return ret;
46738 }
46739
46740 static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
46741 {
46742 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46743- struct dvbt_get_status_msg status;
46744- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46745+ struct dvbt_get_status_msg *status;
46746+ char *cmd;
46747 int ret;
46748
46749- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46750- sizeof(status), 0);
46751+ cmd = kmalloc(1, GFP_KERNEL);
46752+ if (cmd == NULL)
46753+ return -ENOMEM;
46754+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46755+ if (status == NULL) {
46756+ kfree(cmd);
46757+ return -ENOMEM;
46758+ }
46759+
46760+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46761+
46762+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46763+ sizeof(*status), 0);
46764 if (ret < 0)
46765- return ret;
46766+ goto out;
46767
46768- *ber = le32_to_cpu(status.viterbi_error_rate);
46769+ *ber = le32_to_cpu(status->viterbi_error_rate);
46770+out:
46771+ kfree(cmd);
46772+ kfree(status);
46773 return 0;
46774 }
46775
46776 static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
46777 {
46778 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46779- struct dvbt_get_status_msg status;
46780- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46781+ struct dvbt_get_status_msg *status;
46782+ u8 *cmd;
46783 int ret;
46784
46785- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
46786- sizeof(status), 0);
46787+ cmd = kmalloc(1, GFP_KERNEL);
46788+ if (cmd == NULL)
46789+ return -ENOMEM;
46790+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46791+ if (status == NULL) {
46792+ kfree(cmd);
46793+ return -ENOMEM;
46794+ }
46795+
46796+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46797+
46798+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)status,
46799+ sizeof(*status), 0);
46800 if (ret < 0) {
46801 err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
46802 ret);
46803- return ret;
46804+ goto out;
46805 }
46806- *unc = le32_to_cpu(status.uncorrected_block_count);
46807- return 0;
46808+ *unc = le32_to_cpu(status->uncorrected_block_count);
46809+
46810+out:
46811+ kfree(cmd);
46812+ kfree(status);
46813+ return ret;
46814 }
46815
46816 static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
46817 u16 *strength)
46818 {
46819 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46820- struct dvbt_get_status_msg status;
46821- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46822+ struct dvbt_get_status_msg *status;
46823+ char *cmd;
46824 int ret;
46825
46826- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46827- sizeof(status), 0);
46828+ cmd = kmalloc(1, GFP_KERNEL);
46829+ if (cmd == NULL)
46830+ return -ENOMEM;
46831+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46832+ if (status == NULL) {
46833+ kfree(cmd);
46834+ return -ENOMEM;
46835+ }
46836+
46837+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46838+
46839+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46840+ sizeof(*status), 0);
46841 if (ret < 0) {
46842 err("cinergyt2_fe_read_signal_strength() Failed!"
46843 " (Error=%d)\n", ret);
46844- return ret;
46845+ goto out;
46846 }
46847- *strength = (0xffff - le16_to_cpu(status.gain));
46848+ *strength = (0xffff - le16_to_cpu(status->gain));
46849+
46850+out:
46851+ kfree(cmd);
46852+ kfree(status);
46853 return 0;
46854 }
46855
46856 static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
46857 {
46858 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46859- struct dvbt_get_status_msg status;
46860- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46861+ struct dvbt_get_status_msg *status;
46862+ char *cmd;
46863 int ret;
46864
46865- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46866- sizeof(status), 0);
46867+ cmd = kmalloc(1, GFP_KERNEL);
46868+ if (cmd == NULL)
46869+ return -ENOMEM;
46870+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46871+ if (status == NULL) {
46872+ kfree(cmd);
46873+ return -ENOMEM;
46874+ }
46875+
46876+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46877+
46878+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46879+ sizeof(*status), 0);
46880 if (ret < 0) {
46881 err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
46882- return ret;
46883+ goto out;
46884 }
46885- *snr = (status.snr << 8) | status.snr;
46886- return 0;
46887+ *snr = (status->snr << 8) | status->snr;
46888+
46889+out:
46890+ kfree(cmd);
46891+ kfree(status);
46892+ return ret;
46893 }
46894
46895 static int cinergyt2_fe_init(struct dvb_frontend *fe)
46896@@ -266,35 +339,46 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
46897 {
46898 struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
46899 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46900- struct dvbt_set_parameters_msg param;
46901- char result[2];
46902+ struct dvbt_set_parameters_msg *param;
46903+ char *result;
46904 int err;
46905
46906- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
46907- param.tps = cpu_to_le16(compute_tps(fep));
46908- param.freq = cpu_to_le32(fep->frequency / 1000);
46909- param.flags = 0;
46910+ result = kmalloc(2, GFP_KERNEL);
46911+ if (result == NULL)
46912+ return -ENOMEM;
46913+ param = kmalloc(sizeof(*param), GFP_KERNEL);
46914+ if (param == NULL) {
46915+ kfree(result);
46916+ return -ENOMEM;
46917+ }
46918+
46919+ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
46920+ param->tps = cpu_to_le16(compute_tps(fep));
46921+ param->freq = cpu_to_le32(fep->frequency / 1000);
46922+ param->flags = 0;
46923
46924 switch (fep->bandwidth_hz) {
46925 default:
46926 case 8000000:
46927- param.bandwidth = 8;
46928+ param->bandwidth = 8;
46929 break;
46930 case 7000000:
46931- param.bandwidth = 7;
46932+ param->bandwidth = 7;
46933 break;
46934 case 6000000:
46935- param.bandwidth = 6;
46936+ param->bandwidth = 6;
46937 break;
46938 }
46939
46940 err = dvb_usb_generic_rw(state->d,
46941- (char *)&param, sizeof(param),
46942- result, sizeof(result), 0);
46943+ (char *)param, sizeof(*param),
46944+ result, 2, 0);
46945 if (err < 0)
46946 err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
46947
46948- return (err < 0) ? err : 0;
46949+ kfree(result);
46950+ kfree(param);
46951+ return err;
46952 }
46953
46954 static void cinergyt2_fe_release(struct dvb_frontend *fe)
46955diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46956index 733a7ff..f8b52e3 100644
46957--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46958+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46959@@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
46960
46961 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
46962 {
46963- struct hexline hx;
46964- u8 reset;
46965+ struct hexline *hx;
46966+ u8 *reset;
46967 int ret,pos=0;
46968
46969+ reset = kmalloc(1, GFP_KERNEL);
46970+ if (reset == NULL)
46971+ return -ENOMEM;
46972+
46973+ hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
46974+ if (hx == NULL) {
46975+ kfree(reset);
46976+ return -ENOMEM;
46977+ }
46978+
46979 /* stop the CPU */
46980- reset = 1;
46981- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
46982+ reset[0] = 1;
46983+ if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1)
46984 err("could not stop the USB controller CPU.");
46985
46986- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
46987- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
46988- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
46989+ while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) {
46990+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk);
46991+ ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len);
46992
46993- if (ret != hx.len) {
46994+ if (ret != hx->len) {
46995 err("error while transferring firmware "
46996 "(transferred size: %d, block size: %d)",
46997- ret,hx.len);
46998+ ret,hx->len);
46999 ret = -EINVAL;
47000 break;
47001 }
47002 }
47003 if (ret < 0) {
47004 err("firmware download failed at %d with %d",pos,ret);
47005+ kfree(reset);
47006+ kfree(hx);
47007 return ret;
47008 }
47009
47010 if (ret == 0) {
47011 /* restart the CPU */
47012- reset = 0;
47013- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
47014+ reset[0] = 0;
47015+ if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) {
47016 err("could not restart the USB controller CPU.");
47017 ret = -EINVAL;
47018 }
47019 } else
47020 ret = -EIO;
47021
47022+ kfree(reset);
47023+ kfree(hx);
47024+
47025 return ret;
47026 }
47027 EXPORT_SYMBOL(usb_cypress_load_firmware);
47028diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
47029index 1a3df10..57997a5 100644
47030--- a/drivers/media/usb/dvb-usb/dw2102.c
47031+++ b/drivers/media/usb/dvb-usb/dw2102.c
47032@@ -118,7 +118,7 @@ struct su3000_state {
47033
47034 struct s6x0_state {
47035 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
47036-};
47037+} __no_const;
47038
47039 /* debug */
47040 static int dvb_usb_dw2102_debug;
47041diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
47042index 5801ae7..83f71fa 100644
47043--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
47044+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
47045@@ -87,8 +87,11 @@ struct technisat_usb2_state {
47046 static int technisat_usb2_i2c_access(struct usb_device *udev,
47047 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
47048 {
47049- u8 b[64];
47050- int ret, actual_length;
47051+ u8 *b = kmalloc(64, GFP_KERNEL);
47052+ int ret, actual_length, error = 0;
47053+
47054+ if (b == NULL)
47055+ return -ENOMEM;
47056
47057 deb_i2c("i2c-access: %02x, tx: ", device_addr);
47058 debug_dump(tx, txlen, deb_i2c);
47059@@ -121,7 +124,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47060
47061 if (ret < 0) {
47062 err("i2c-error: out failed %02x = %d", device_addr, ret);
47063- return -ENODEV;
47064+ error = -ENODEV;
47065+ goto out;
47066 }
47067
47068 ret = usb_bulk_msg(udev,
47069@@ -129,7 +133,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47070 b, 64, &actual_length, 1000);
47071 if (ret < 0) {
47072 err("i2c-error: in failed %02x = %d", device_addr, ret);
47073- return -ENODEV;
47074+ error = -ENODEV;
47075+ goto out;
47076 }
47077
47078 if (b[0] != I2C_STATUS_OK) {
47079@@ -137,8 +142,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47080 /* handle tuner-i2c-nak */
47081 if (!(b[0] == I2C_STATUS_NAK &&
47082 device_addr == 0x60
47083- /* && device_is_technisat_usb2 */))
47084- return -ENODEV;
47085+ /* && device_is_technisat_usb2 */)) {
47086+ error = -ENODEV;
47087+ goto out;
47088+ }
47089 }
47090
47091 deb_i2c("status: %d, ", b[0]);
47092@@ -152,7 +159,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47093
47094 deb_i2c("\n");
47095
47096- return 0;
47097+out:
47098+ kfree(b);
47099+ return error;
47100 }
47101
47102 static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
47103@@ -224,14 +233,16 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
47104 {
47105 int ret;
47106
47107- u8 led[8] = {
47108- red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
47109- 0
47110- };
47111+ u8 *led = kzalloc(8, GFP_KERNEL);
47112+
47113+ if (led == NULL)
47114+ return -ENOMEM;
47115
47116 if (disable_led_control && state != TECH_LED_OFF)
47117 return 0;
47118
47119+ led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST;
47120+
47121 switch (state) {
47122 case TECH_LED_ON:
47123 led[1] = 0x82;
47124@@ -263,16 +274,22 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
47125 red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
47126 USB_TYPE_VENDOR | USB_DIR_OUT,
47127 0, 0,
47128- led, sizeof(led), 500);
47129+ led, 8, 500);
47130
47131 mutex_unlock(&d->i2c_mutex);
47132+
47133+ kfree(led);
47134+
47135 return ret;
47136 }
47137
47138 static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
47139 {
47140 int ret;
47141- u8 b = 0;
47142+ u8 *b = kzalloc(1, GFP_KERNEL);
47143+
47144+ if (b == NULL)
47145+ return -ENOMEM;
47146
47147 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
47148 return -EAGAIN;
47149@@ -281,10 +298,12 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre
47150 SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
47151 USB_TYPE_VENDOR | USB_DIR_OUT,
47152 (red << 8) | green, 0,
47153- &b, 1, 500);
47154+ b, 1, 500);
47155
47156 mutex_unlock(&d->i2c_mutex);
47157
47158+ kfree(b);
47159+
47160 return ret;
47161 }
47162
47163@@ -328,7 +347,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
47164 struct dvb_usb_device_description **desc, int *cold)
47165 {
47166 int ret;
47167- u8 version[3];
47168+ u8 *version = kmalloc(3, GFP_KERNEL);
47169
47170 /* first select the interface */
47171 if (usb_set_interface(udev, 0, 1) != 0)
47172@@ -338,11 +357,14 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
47173
47174 *cold = 0; /* by default do not download a firmware - just in case something is wrong */
47175
47176+ if (version == NULL)
47177+ return 0;
47178+
47179 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
47180 GET_VERSION_INFO_VENDOR_REQUEST,
47181 USB_TYPE_VENDOR | USB_DIR_IN,
47182 0, 0,
47183- version, sizeof(version), 500);
47184+ version, 3, 500);
47185
47186 if (ret < 0)
47187 *cold = 1;
47188@@ -351,6 +373,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
47189 *cold = 0;
47190 }
47191
47192+ kfree(version);
47193+
47194 return 0;
47195 }
47196
47197@@ -594,10 +618,15 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
47198
47199 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
47200 {
47201- u8 buf[62], *b;
47202+ u8 *buf, *b;
47203 int ret;
47204 struct ir_raw_event ev;
47205
47206+ buf = kmalloc(62, GFP_KERNEL);
47207+
47208+ if (buf == NULL)
47209+ return -ENOMEM;
47210+
47211 buf[0] = GET_IR_DATA_VENDOR_REQUEST;
47212 buf[1] = 0x08;
47213 buf[2] = 0x8f;
47214@@ -620,16 +649,20 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
47215 GET_IR_DATA_VENDOR_REQUEST,
47216 USB_TYPE_VENDOR | USB_DIR_IN,
47217 0x8080, 0,
47218- buf, sizeof(buf), 500);
47219+ buf, 62, 500);
47220
47221 unlock:
47222 mutex_unlock(&d->i2c_mutex);
47223
47224- if (ret < 0)
47225+ if (ret < 0) {
47226+ kfree(buf);
47227 return ret;
47228+ }
47229
47230- if (ret == 1)
47231+ if (ret == 1) {
47232+ kfree(buf);
47233 return 0; /* no key pressed */
47234+ }
47235
47236 /* decoding */
47237 b = buf+1;
47238@@ -656,6 +689,8 @@ unlock:
47239
47240 ir_raw_event_handle(d->rc_dev);
47241
47242+ kfree(buf);
47243+
47244 return 1;
47245 }
47246
47247diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47248index af63543..0436f20 100644
47249--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47250+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47251@@ -429,7 +429,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
47252 * by passing a very big num_planes value */
47253 uplane = compat_alloc_user_space(num_planes *
47254 sizeof(struct v4l2_plane));
47255- kp->m.planes = (__force struct v4l2_plane *)uplane;
47256+ kp->m.planes = (__force_kernel struct v4l2_plane *)uplane;
47257
47258 while (--num_planes >= 0) {
47259 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
47260@@ -500,7 +500,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
47261 if (num_planes == 0)
47262 return 0;
47263
47264- uplane = (__force struct v4l2_plane __user *)kp->m.planes;
47265+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
47266 if (get_user(p, &up->m.planes))
47267 return -EFAULT;
47268 uplane32 = compat_ptr(p);
47269@@ -564,7 +564,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
47270 get_user(kp->flags, &up->flags) ||
47271 copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
47272 return -EFAULT;
47273- kp->base = (__force void *)compat_ptr(tmp);
47274+ kp->base = (__force_kernel void *)compat_ptr(tmp);
47275 return 0;
47276 }
47277
47278@@ -669,7 +669,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
47279 n * sizeof(struct v4l2_ext_control32)))
47280 return -EFAULT;
47281 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
47282- kp->controls = (__force struct v4l2_ext_control *)kcontrols;
47283+ kp->controls = (__force_kernel struct v4l2_ext_control *)kcontrols;
47284 while (--n >= 0) {
47285 u32 id;
47286
47287@@ -696,7 +696,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
47288 {
47289 struct v4l2_ext_control32 __user *ucontrols;
47290 struct v4l2_ext_control __user *kcontrols =
47291- (__force struct v4l2_ext_control __user *)kp->controls;
47292+ (struct v4l2_ext_control __force_user *)kp->controls;
47293 int n = kp->count;
47294 compat_caddr_t p;
47295
47296@@ -780,7 +780,7 @@ static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
47297 get_user(tmp, &up->edid) ||
47298 copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
47299 return -EFAULT;
47300- kp->edid = (__force u8 *)compat_ptr(tmp);
47301+ kp->edid = (__force_kernel u8 *)compat_ptr(tmp);
47302 return 0;
47303 }
47304
47305diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
47306index 015f92a..59e311e 100644
47307--- a/drivers/media/v4l2-core/v4l2-device.c
47308+++ b/drivers/media/v4l2-core/v4l2-device.c
47309@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
47310 EXPORT_SYMBOL_GPL(v4l2_device_put);
47311
47312 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
47313- atomic_t *instance)
47314+ atomic_unchecked_t *instance)
47315 {
47316- int num = atomic_inc_return(instance) - 1;
47317+ int num = atomic_inc_return_unchecked(instance) - 1;
47318 int len = strlen(basename);
47319
47320 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
47321diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
47322index b084072..36706d7 100644
47323--- a/drivers/media/v4l2-core/v4l2-ioctl.c
47324+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
47325@@ -2151,7 +2151,8 @@ struct v4l2_ioctl_info {
47326 struct file *file, void *fh, void *p);
47327 } u;
47328 void (*debug)(const void *arg, bool write_only);
47329-};
47330+} __do_const;
47331+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
47332
47333 /* This control needs a priority check */
47334 #define INFO_FL_PRIO (1 << 0)
47335@@ -2335,7 +2336,7 @@ static long __video_do_ioctl(struct file *file,
47336 struct video_device *vfd = video_devdata(file);
47337 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
47338 bool write_only = false;
47339- struct v4l2_ioctl_info default_info;
47340+ v4l2_ioctl_info_no_const default_info;
47341 const struct v4l2_ioctl_info *info;
47342 void *fh = file->private_data;
47343 struct v4l2_fh *vfh = NULL;
47344@@ -2426,7 +2427,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47345 ret = -EINVAL;
47346 break;
47347 }
47348- *user_ptr = (void __user *)buf->m.planes;
47349+ *user_ptr = (void __force_user *)buf->m.planes;
47350 *kernel_ptr = (void **)&buf->m.planes;
47351 *array_size = sizeof(struct v4l2_plane) * buf->length;
47352 ret = 1;
47353@@ -2443,7 +2444,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47354 ret = -EINVAL;
47355 break;
47356 }
47357- *user_ptr = (void __user *)edid->edid;
47358+ *user_ptr = (void __force_user *)edid->edid;
47359 *kernel_ptr = (void **)&edid->edid;
47360 *array_size = edid->blocks * 128;
47361 ret = 1;
47362@@ -2461,7 +2462,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47363 ret = -EINVAL;
47364 break;
47365 }
47366- *user_ptr = (void __user *)ctrls->controls;
47367+ *user_ptr = (void __force_user *)ctrls->controls;
47368 *kernel_ptr = (void **)&ctrls->controls;
47369 *array_size = sizeof(struct v4l2_ext_control)
47370 * ctrls->count;
47371@@ -2562,7 +2563,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
47372 }
47373
47374 if (has_array_args) {
47375- *kernel_ptr = (void __force *)user_ptr;
47376+ *kernel_ptr = (void __force_kernel *)user_ptr;
47377 if (copy_to_user(user_ptr, mbuf, array_size))
47378 err = -EFAULT;
47379 goto out_array_args;
47380diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
47381index 24696f5..3637780 100644
47382--- a/drivers/memory/omap-gpmc.c
47383+++ b/drivers/memory/omap-gpmc.c
47384@@ -211,7 +211,6 @@ struct omap3_gpmc_regs {
47385 };
47386
47387 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
47388-static struct irq_chip gpmc_irq_chip;
47389 static int gpmc_irq_start;
47390
47391 static struct resource gpmc_mem_root;
47392@@ -939,6 +938,17 @@ static void gpmc_irq_noop(struct irq_data *data) { }
47393
47394 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
47395
47396+static struct irq_chip gpmc_irq_chip = {
47397+ .name = "gpmc",
47398+ .irq_startup = gpmc_irq_noop_ret,
47399+ .irq_enable = gpmc_irq_enable,
47400+ .irq_disable = gpmc_irq_disable,
47401+ .irq_shutdown = gpmc_irq_noop,
47402+ .irq_ack = gpmc_irq_noop,
47403+ .irq_mask = gpmc_irq_noop,
47404+ .irq_unmask = gpmc_irq_noop,
47405+};
47406+
47407 static int gpmc_setup_irq(void)
47408 {
47409 int i;
47410@@ -953,15 +963,6 @@ static int gpmc_setup_irq(void)
47411 return gpmc_irq_start;
47412 }
47413
47414- gpmc_irq_chip.name = "gpmc";
47415- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
47416- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
47417- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
47418- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
47419- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
47420- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
47421- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
47422-
47423 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
47424 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
47425
47426diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
47427index 187f836..679544b 100644
47428--- a/drivers/message/fusion/mptbase.c
47429+++ b/drivers/message/fusion/mptbase.c
47430@@ -6746,8 +6746,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
47431 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
47432 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
47433
47434+#ifdef CONFIG_GRKERNSEC_HIDESYM
47435+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
47436+#else
47437 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
47438 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
47439+#endif
47440+
47441 /*
47442 * Rounding UP to nearest 4-kB boundary here...
47443 */
47444@@ -6760,7 +6765,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
47445 ioc->facts.GlobalCredits);
47446
47447 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
47448+#ifdef CONFIG_GRKERNSEC_HIDESYM
47449+ NULL, NULL);
47450+#else
47451 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
47452+#endif
47453 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
47454 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
47455 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
47456diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
47457index 5bdaae1..eced16f 100644
47458--- a/drivers/message/fusion/mptsas.c
47459+++ b/drivers/message/fusion/mptsas.c
47460@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
47461 return 0;
47462 }
47463
47464+static inline void
47465+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
47466+{
47467+ if (phy_info->port_details) {
47468+ phy_info->port_details->rphy = rphy;
47469+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
47470+ ioc->name, rphy));
47471+ }
47472+
47473+ if (rphy) {
47474+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
47475+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
47476+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
47477+ ioc->name, rphy, rphy->dev.release));
47478+ }
47479+}
47480+
47481 /* no mutex */
47482 static void
47483 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
47484@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
47485 return NULL;
47486 }
47487
47488-static inline void
47489-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
47490-{
47491- if (phy_info->port_details) {
47492- phy_info->port_details->rphy = rphy;
47493- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
47494- ioc->name, rphy));
47495- }
47496-
47497- if (rphy) {
47498- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
47499- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
47500- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
47501- ioc->name, rphy, rphy->dev.release));
47502- }
47503-}
47504-
47505 static inline struct sas_port *
47506 mptsas_get_port(struct mptsas_phyinfo *phy_info)
47507 {
47508diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
47509index 9a8e185..27ff17d 100644
47510--- a/drivers/mfd/ab8500-debugfs.c
47511+++ b/drivers/mfd/ab8500-debugfs.c
47512@@ -100,7 +100,7 @@ static int irq_last;
47513 static u32 *irq_count;
47514 static int num_irqs;
47515
47516-static struct device_attribute **dev_attr;
47517+static device_attribute_no_const **dev_attr;
47518 static char **event_name;
47519
47520 static u8 avg_sample = SAMPLE_16;
47521diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
47522index 5615522..1eb6f3dc 100644
47523--- a/drivers/mfd/kempld-core.c
47524+++ b/drivers/mfd/kempld-core.c
47525@@ -499,7 +499,7 @@ static struct platform_driver kempld_driver = {
47526 .remove = kempld_remove,
47527 };
47528
47529-static struct dmi_system_id kempld_dmi_table[] __initdata = {
47530+static const struct dmi_system_id kempld_dmi_table[] __initconst = {
47531 {
47532 .ident = "BHL6",
47533 .matches = {
47534diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
47535index c880c89..45a7c68 100644
47536--- a/drivers/mfd/max8925-i2c.c
47537+++ b/drivers/mfd/max8925-i2c.c
47538@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
47539 const struct i2c_device_id *id)
47540 {
47541 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
47542- static struct max8925_chip *chip;
47543+ struct max8925_chip *chip;
47544 struct device_node *node = client->dev.of_node;
47545
47546 if (node && !pdata) {
47547diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
47548index 7612d89..70549c2 100644
47549--- a/drivers/mfd/tps65910.c
47550+++ b/drivers/mfd/tps65910.c
47551@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
47552 struct tps65910_platform_data *pdata)
47553 {
47554 int ret = 0;
47555- static struct regmap_irq_chip *tps6591x_irqs_chip;
47556+ struct regmap_irq_chip *tps6591x_irqs_chip;
47557
47558 if (!irq) {
47559 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
47560diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
47561index 1b772ef..01e77d33 100644
47562--- a/drivers/mfd/twl4030-irq.c
47563+++ b/drivers/mfd/twl4030-irq.c
47564@@ -34,6 +34,7 @@
47565 #include <linux/of.h>
47566 #include <linux/irqdomain.h>
47567 #include <linux/i2c/twl.h>
47568+#include <asm/pgtable.h>
47569
47570 #include "twl-core.h"
47571
47572@@ -729,10 +730,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
47573 * Install an irq handler for each of the SIH modules;
47574 * clone dummy irq_chip since PIH can't *do* anything
47575 */
47576- twl4030_irq_chip = dummy_irq_chip;
47577- twl4030_irq_chip.name = "twl4030";
47578+ pax_open_kernel();
47579+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
47580+ *(const char **)&twl4030_irq_chip.name = "twl4030";
47581
47582- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
47583+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
47584+ pax_close_kernel();
47585
47586 for (i = irq_base; i < irq_end; i++) {
47587 irq_set_chip_and_handler(i, &twl4030_irq_chip,
47588diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
47589index 464419b..64bae8d 100644
47590--- a/drivers/misc/c2port/core.c
47591+++ b/drivers/misc/c2port/core.c
47592@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
47593 goto error_idr_alloc;
47594 c2dev->id = ret;
47595
47596- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
47597+ pax_open_kernel();
47598+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
47599+ pax_close_kernel();
47600
47601 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
47602 "c2port%d", c2dev->id);
47603diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
47604index 8385177..2f54635 100644
47605--- a/drivers/misc/eeprom/sunxi_sid.c
47606+++ b/drivers/misc/eeprom/sunxi_sid.c
47607@@ -126,7 +126,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
47608
47609 platform_set_drvdata(pdev, sid_data);
47610
47611- sid_bin_attr.size = sid_data->keysize;
47612+ pax_open_kernel();
47613+ *(size_t *)&sid_bin_attr.size = sid_data->keysize;
47614+ pax_close_kernel();
47615 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
47616 return -ENODEV;
47617
47618diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
47619index 36f5d52..32311c3 100644
47620--- a/drivers/misc/kgdbts.c
47621+++ b/drivers/misc/kgdbts.c
47622@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
47623 char before[BREAK_INSTR_SIZE];
47624 char after[BREAK_INSTR_SIZE];
47625
47626- probe_kernel_read(before, (char *)kgdbts_break_test,
47627+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
47628 BREAK_INSTR_SIZE);
47629 init_simple_test();
47630 ts.tst = plant_and_detach_test;
47631@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
47632 /* Activate test with initial breakpoint */
47633 if (!is_early)
47634 kgdb_breakpoint();
47635- probe_kernel_read(after, (char *)kgdbts_break_test,
47636+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
47637 BREAK_INSTR_SIZE);
47638 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
47639 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
47640diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
47641index 3ef4627..8d00486 100644
47642--- a/drivers/misc/lis3lv02d/lis3lv02d.c
47643+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
47644@@ -497,7 +497,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
47645 * the lid is closed. This leads to interrupts as soon as a little move
47646 * is done.
47647 */
47648- atomic_inc(&lis3->count);
47649+ atomic_inc_unchecked(&lis3->count);
47650
47651 wake_up_interruptible(&lis3->misc_wait);
47652 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
47653@@ -583,7 +583,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
47654 if (lis3->pm_dev)
47655 pm_runtime_get_sync(lis3->pm_dev);
47656
47657- atomic_set(&lis3->count, 0);
47658+ atomic_set_unchecked(&lis3->count, 0);
47659 return 0;
47660 }
47661
47662@@ -615,7 +615,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
47663 add_wait_queue(&lis3->misc_wait, &wait);
47664 while (true) {
47665 set_current_state(TASK_INTERRUPTIBLE);
47666- data = atomic_xchg(&lis3->count, 0);
47667+ data = atomic_xchg_unchecked(&lis3->count, 0);
47668 if (data)
47669 break;
47670
47671@@ -656,7 +656,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
47672 struct lis3lv02d, miscdev);
47673
47674 poll_wait(file, &lis3->misc_wait, wait);
47675- if (atomic_read(&lis3->count))
47676+ if (atomic_read_unchecked(&lis3->count))
47677 return POLLIN | POLLRDNORM;
47678 return 0;
47679 }
47680diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
47681index c439c82..1f20f57 100644
47682--- a/drivers/misc/lis3lv02d/lis3lv02d.h
47683+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
47684@@ -297,7 +297,7 @@ struct lis3lv02d {
47685 struct input_polled_dev *idev; /* input device */
47686 struct platform_device *pdev; /* platform device */
47687 struct regulator_bulk_data regulators[2];
47688- atomic_t count; /* interrupt count after last read */
47689+ atomic_unchecked_t count; /* interrupt count after last read */
47690 union axis_conversion ac; /* hw -> logical axis */
47691 int mapped_btns[3];
47692
47693diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
47694index 2f30bad..c4c13d0 100644
47695--- a/drivers/misc/sgi-gru/gruhandles.c
47696+++ b/drivers/misc/sgi-gru/gruhandles.c
47697@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
47698 unsigned long nsec;
47699
47700 nsec = CLKS2NSEC(clks);
47701- atomic_long_inc(&mcs_op_statistics[op].count);
47702- atomic_long_add(nsec, &mcs_op_statistics[op].total);
47703+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
47704+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
47705 if (mcs_op_statistics[op].max < nsec)
47706 mcs_op_statistics[op].max = nsec;
47707 }
47708diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
47709index 4f76359..cdfcb2e 100644
47710--- a/drivers/misc/sgi-gru/gruprocfs.c
47711+++ b/drivers/misc/sgi-gru/gruprocfs.c
47712@@ -32,9 +32,9 @@
47713
47714 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
47715
47716-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
47717+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
47718 {
47719- unsigned long val = atomic_long_read(v);
47720+ unsigned long val = atomic_long_read_unchecked(v);
47721
47722 seq_printf(s, "%16lu %s\n", val, id);
47723 }
47724@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
47725
47726 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
47727 for (op = 0; op < mcsop_last; op++) {
47728- count = atomic_long_read(&mcs_op_statistics[op].count);
47729- total = atomic_long_read(&mcs_op_statistics[op].total);
47730+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
47731+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
47732 max = mcs_op_statistics[op].max;
47733 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
47734 count ? total / count : 0, max);
47735diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
47736index 5c3ce24..4915ccb 100644
47737--- a/drivers/misc/sgi-gru/grutables.h
47738+++ b/drivers/misc/sgi-gru/grutables.h
47739@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
47740 * GRU statistics.
47741 */
47742 struct gru_stats_s {
47743- atomic_long_t vdata_alloc;
47744- atomic_long_t vdata_free;
47745- atomic_long_t gts_alloc;
47746- atomic_long_t gts_free;
47747- atomic_long_t gms_alloc;
47748- atomic_long_t gms_free;
47749- atomic_long_t gts_double_allocate;
47750- atomic_long_t assign_context;
47751- atomic_long_t assign_context_failed;
47752- atomic_long_t free_context;
47753- atomic_long_t load_user_context;
47754- atomic_long_t load_kernel_context;
47755- atomic_long_t lock_kernel_context;
47756- atomic_long_t unlock_kernel_context;
47757- atomic_long_t steal_user_context;
47758- atomic_long_t steal_kernel_context;
47759- atomic_long_t steal_context_failed;
47760- atomic_long_t nopfn;
47761- atomic_long_t asid_new;
47762- atomic_long_t asid_next;
47763- atomic_long_t asid_wrap;
47764- atomic_long_t asid_reuse;
47765- atomic_long_t intr;
47766- atomic_long_t intr_cbr;
47767- atomic_long_t intr_tfh;
47768- atomic_long_t intr_spurious;
47769- atomic_long_t intr_mm_lock_failed;
47770- atomic_long_t call_os;
47771- atomic_long_t call_os_wait_queue;
47772- atomic_long_t user_flush_tlb;
47773- atomic_long_t user_unload_context;
47774- atomic_long_t user_exception;
47775- atomic_long_t set_context_option;
47776- atomic_long_t check_context_retarget_intr;
47777- atomic_long_t check_context_unload;
47778- atomic_long_t tlb_dropin;
47779- atomic_long_t tlb_preload_page;
47780- atomic_long_t tlb_dropin_fail_no_asid;
47781- atomic_long_t tlb_dropin_fail_upm;
47782- atomic_long_t tlb_dropin_fail_invalid;
47783- atomic_long_t tlb_dropin_fail_range_active;
47784- atomic_long_t tlb_dropin_fail_idle;
47785- atomic_long_t tlb_dropin_fail_fmm;
47786- atomic_long_t tlb_dropin_fail_no_exception;
47787- atomic_long_t tfh_stale_on_fault;
47788- atomic_long_t mmu_invalidate_range;
47789- atomic_long_t mmu_invalidate_page;
47790- atomic_long_t flush_tlb;
47791- atomic_long_t flush_tlb_gru;
47792- atomic_long_t flush_tlb_gru_tgh;
47793- atomic_long_t flush_tlb_gru_zero_asid;
47794+ atomic_long_unchecked_t vdata_alloc;
47795+ atomic_long_unchecked_t vdata_free;
47796+ atomic_long_unchecked_t gts_alloc;
47797+ atomic_long_unchecked_t gts_free;
47798+ atomic_long_unchecked_t gms_alloc;
47799+ atomic_long_unchecked_t gms_free;
47800+ atomic_long_unchecked_t gts_double_allocate;
47801+ atomic_long_unchecked_t assign_context;
47802+ atomic_long_unchecked_t assign_context_failed;
47803+ atomic_long_unchecked_t free_context;
47804+ atomic_long_unchecked_t load_user_context;
47805+ atomic_long_unchecked_t load_kernel_context;
47806+ atomic_long_unchecked_t lock_kernel_context;
47807+ atomic_long_unchecked_t unlock_kernel_context;
47808+ atomic_long_unchecked_t steal_user_context;
47809+ atomic_long_unchecked_t steal_kernel_context;
47810+ atomic_long_unchecked_t steal_context_failed;
47811+ atomic_long_unchecked_t nopfn;
47812+ atomic_long_unchecked_t asid_new;
47813+ atomic_long_unchecked_t asid_next;
47814+ atomic_long_unchecked_t asid_wrap;
47815+ atomic_long_unchecked_t asid_reuse;
47816+ atomic_long_unchecked_t intr;
47817+ atomic_long_unchecked_t intr_cbr;
47818+ atomic_long_unchecked_t intr_tfh;
47819+ atomic_long_unchecked_t intr_spurious;
47820+ atomic_long_unchecked_t intr_mm_lock_failed;
47821+ atomic_long_unchecked_t call_os;
47822+ atomic_long_unchecked_t call_os_wait_queue;
47823+ atomic_long_unchecked_t user_flush_tlb;
47824+ atomic_long_unchecked_t user_unload_context;
47825+ atomic_long_unchecked_t user_exception;
47826+ atomic_long_unchecked_t set_context_option;
47827+ atomic_long_unchecked_t check_context_retarget_intr;
47828+ atomic_long_unchecked_t check_context_unload;
47829+ atomic_long_unchecked_t tlb_dropin;
47830+ atomic_long_unchecked_t tlb_preload_page;
47831+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
47832+ atomic_long_unchecked_t tlb_dropin_fail_upm;
47833+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
47834+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
47835+ atomic_long_unchecked_t tlb_dropin_fail_idle;
47836+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
47837+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
47838+ atomic_long_unchecked_t tfh_stale_on_fault;
47839+ atomic_long_unchecked_t mmu_invalidate_range;
47840+ atomic_long_unchecked_t mmu_invalidate_page;
47841+ atomic_long_unchecked_t flush_tlb;
47842+ atomic_long_unchecked_t flush_tlb_gru;
47843+ atomic_long_unchecked_t flush_tlb_gru_tgh;
47844+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
47845
47846- atomic_long_t copy_gpa;
47847- atomic_long_t read_gpa;
47848+ atomic_long_unchecked_t copy_gpa;
47849+ atomic_long_unchecked_t read_gpa;
47850
47851- atomic_long_t mesq_receive;
47852- atomic_long_t mesq_receive_none;
47853- atomic_long_t mesq_send;
47854- atomic_long_t mesq_send_failed;
47855- atomic_long_t mesq_noop;
47856- atomic_long_t mesq_send_unexpected_error;
47857- atomic_long_t mesq_send_lb_overflow;
47858- atomic_long_t mesq_send_qlimit_reached;
47859- atomic_long_t mesq_send_amo_nacked;
47860- atomic_long_t mesq_send_put_nacked;
47861- atomic_long_t mesq_page_overflow;
47862- atomic_long_t mesq_qf_locked;
47863- atomic_long_t mesq_qf_noop_not_full;
47864- atomic_long_t mesq_qf_switch_head_failed;
47865- atomic_long_t mesq_qf_unexpected_error;
47866- atomic_long_t mesq_noop_unexpected_error;
47867- atomic_long_t mesq_noop_lb_overflow;
47868- atomic_long_t mesq_noop_qlimit_reached;
47869- atomic_long_t mesq_noop_amo_nacked;
47870- atomic_long_t mesq_noop_put_nacked;
47871- atomic_long_t mesq_noop_page_overflow;
47872+ atomic_long_unchecked_t mesq_receive;
47873+ atomic_long_unchecked_t mesq_receive_none;
47874+ atomic_long_unchecked_t mesq_send;
47875+ atomic_long_unchecked_t mesq_send_failed;
47876+ atomic_long_unchecked_t mesq_noop;
47877+ atomic_long_unchecked_t mesq_send_unexpected_error;
47878+ atomic_long_unchecked_t mesq_send_lb_overflow;
47879+ atomic_long_unchecked_t mesq_send_qlimit_reached;
47880+ atomic_long_unchecked_t mesq_send_amo_nacked;
47881+ atomic_long_unchecked_t mesq_send_put_nacked;
47882+ atomic_long_unchecked_t mesq_page_overflow;
47883+ atomic_long_unchecked_t mesq_qf_locked;
47884+ atomic_long_unchecked_t mesq_qf_noop_not_full;
47885+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
47886+ atomic_long_unchecked_t mesq_qf_unexpected_error;
47887+ atomic_long_unchecked_t mesq_noop_unexpected_error;
47888+ atomic_long_unchecked_t mesq_noop_lb_overflow;
47889+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
47890+ atomic_long_unchecked_t mesq_noop_amo_nacked;
47891+ atomic_long_unchecked_t mesq_noop_put_nacked;
47892+ atomic_long_unchecked_t mesq_noop_page_overflow;
47893
47894 };
47895
47896@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
47897 tghop_invalidate, mcsop_last};
47898
47899 struct mcs_op_statistic {
47900- atomic_long_t count;
47901- atomic_long_t total;
47902+ atomic_long_unchecked_t count;
47903+ atomic_long_unchecked_t total;
47904 unsigned long max;
47905 };
47906
47907@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
47908
47909 #define STAT(id) do { \
47910 if (gru_options & OPT_STATS) \
47911- atomic_long_inc(&gru_stats.id); \
47912+ atomic_long_inc_unchecked(&gru_stats.id); \
47913 } while (0)
47914
47915 #ifdef CONFIG_SGI_GRU_DEBUG
47916diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
47917index c862cd4..0d176fe 100644
47918--- a/drivers/misc/sgi-xp/xp.h
47919+++ b/drivers/misc/sgi-xp/xp.h
47920@@ -288,7 +288,7 @@ struct xpc_interface {
47921 xpc_notify_func, void *);
47922 void (*received) (short, int, void *);
47923 enum xp_retval (*partid_to_nasids) (short, void *);
47924-};
47925+} __no_const;
47926
47927 extern struct xpc_interface xpc_interface;
47928
47929diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
47930index 01be66d..e3a0c7e 100644
47931--- a/drivers/misc/sgi-xp/xp_main.c
47932+++ b/drivers/misc/sgi-xp/xp_main.c
47933@@ -78,13 +78,13 @@ xpc_notloaded(void)
47934 }
47935
47936 struct xpc_interface xpc_interface = {
47937- (void (*)(int))xpc_notloaded,
47938- (void (*)(int))xpc_notloaded,
47939- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
47940- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
47941+ .connect = (void (*)(int))xpc_notloaded,
47942+ .disconnect = (void (*)(int))xpc_notloaded,
47943+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
47944+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
47945 void *))xpc_notloaded,
47946- (void (*)(short, int, void *))xpc_notloaded,
47947- (enum xp_retval(*)(short, void *))xpc_notloaded
47948+ .received = (void (*)(short, int, void *))xpc_notloaded,
47949+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
47950 };
47951 EXPORT_SYMBOL_GPL(xpc_interface);
47952
47953diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
47954index b94d5f7..7f494c5 100644
47955--- a/drivers/misc/sgi-xp/xpc.h
47956+++ b/drivers/misc/sgi-xp/xpc.h
47957@@ -835,6 +835,7 @@ struct xpc_arch_operations {
47958 void (*received_payload) (struct xpc_channel *, void *);
47959 void (*notify_senders_of_disconnect) (struct xpc_channel *);
47960 };
47961+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
47962
47963 /* struct xpc_partition act_state values (for XPC HB) */
47964
47965@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
47966 /* found in xpc_main.c */
47967 extern struct device *xpc_part;
47968 extern struct device *xpc_chan;
47969-extern struct xpc_arch_operations xpc_arch_ops;
47970+extern xpc_arch_operations_no_const xpc_arch_ops;
47971 extern int xpc_disengage_timelimit;
47972 extern int xpc_disengage_timedout;
47973 extern int xpc_activate_IRQ_rcvd;
47974diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
47975index 82dc574..8539ab2 100644
47976--- a/drivers/misc/sgi-xp/xpc_main.c
47977+++ b/drivers/misc/sgi-xp/xpc_main.c
47978@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
47979 .notifier_call = xpc_system_die,
47980 };
47981
47982-struct xpc_arch_operations xpc_arch_ops;
47983+xpc_arch_operations_no_const xpc_arch_ops;
47984
47985 /*
47986 * Timer function to enforce the timelimit on the partition disengage.
47987@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
47988
47989 if (((die_args->trapnr == X86_TRAP_MF) ||
47990 (die_args->trapnr == X86_TRAP_XF)) &&
47991- !user_mode_vm(die_args->regs))
47992+ !user_mode(die_args->regs))
47993 xpc_die_deactivate();
47994
47995 break;
47996diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
47997index ed2e71a..54c498e 100644
47998--- a/drivers/mmc/card/block.c
47999+++ b/drivers/mmc/card/block.c
48000@@ -577,7 +577,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
48001 if (idata->ic.postsleep_min_us)
48002 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
48003
48004- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
48005+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
48006 err = -EFAULT;
48007 goto cmd_rel_host;
48008 }
48009diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
48010index 18c4afe..43be71e 100644
48011--- a/drivers/mmc/host/dw_mmc.h
48012+++ b/drivers/mmc/host/dw_mmc.h
48013@@ -271,5 +271,5 @@ struct dw_mci_drv_data {
48014 void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
48015 int (*parse_dt)(struct dw_mci *host);
48016 int (*execute_tuning)(struct dw_mci_slot *slot);
48017-};
48018+} __do_const;
48019 #endif /* _DW_MMC_H_ */
48020diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
48021index 7fe1619..ae0781b 100644
48022--- a/drivers/mmc/host/mmci.c
48023+++ b/drivers/mmc/host/mmci.c
48024@@ -1630,7 +1630,9 @@ static int mmci_probe(struct amba_device *dev,
48025 mmc->caps |= MMC_CAP_CMD23;
48026
48027 if (variant->busy_detect) {
48028- mmci_ops.card_busy = mmci_card_busy;
48029+ pax_open_kernel();
48030+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
48031+ pax_close_kernel();
48032 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
48033 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
48034 mmc->max_busy_timeout = 0;
48035diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
48036index f84cfb0..aebe5d6 100644
48037--- a/drivers/mmc/host/omap_hsmmc.c
48038+++ b/drivers/mmc/host/omap_hsmmc.c
48039@@ -2054,7 +2054,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
48040
48041 if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
48042 dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
48043- omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
48044+ pax_open_kernel();
48045+ *(void **)&omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
48046+ pax_close_kernel();
48047 }
48048
48049 pm_runtime_enable(host->dev);
48050diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
48051index 10ef824..88461a2 100644
48052--- a/drivers/mmc/host/sdhci-esdhc-imx.c
48053+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
48054@@ -989,9 +989,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
48055 host->mmc->caps |= MMC_CAP_1_8V_DDR;
48056 }
48057
48058- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
48059- sdhci_esdhc_ops.platform_execute_tuning =
48060+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
48061+ pax_open_kernel();
48062+ *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
48063 esdhc_executing_tuning;
48064+ pax_close_kernel();
48065+ }
48066
48067 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
48068 writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
48069diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
48070index c6d2dd7..81b1ca3 100644
48071--- a/drivers/mmc/host/sdhci-s3c.c
48072+++ b/drivers/mmc/host/sdhci-s3c.c
48073@@ -598,9 +598,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
48074 * we can use overriding functions instead of default.
48075 */
48076 if (sc->no_divider) {
48077- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
48078- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
48079- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
48080+ pax_open_kernel();
48081+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
48082+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
48083+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
48084+ pax_close_kernel();
48085 }
48086
48087 /* It supports additional host capabilities if needed */
48088diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
48089index 423666b..81ff5eb 100644
48090--- a/drivers/mtd/chips/cfi_cmdset_0020.c
48091+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
48092@@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
48093 size_t totlen = 0, thislen;
48094 int ret = 0;
48095 size_t buflen = 0;
48096- static char *buffer;
48097+ char *buffer;
48098
48099 if (!ECCBUF_SIZE) {
48100 /* We should fall back to a general writev implementation.
48101diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
48102index f44c606..aa4e804 100644
48103--- a/drivers/mtd/nand/denali.c
48104+++ b/drivers/mtd/nand/denali.c
48105@@ -24,6 +24,7 @@
48106 #include <linux/slab.h>
48107 #include <linux/mtd/mtd.h>
48108 #include <linux/module.h>
48109+#include <linux/slab.h>
48110
48111 #include "denali.h"
48112
48113diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
48114index 33f3c3c..d6bbe6a 100644
48115--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
48116+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
48117@@ -386,7 +386,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
48118
48119 /* first try to map the upper buffer directly */
48120 if (virt_addr_valid(this->upper_buf) &&
48121- !object_is_on_stack(this->upper_buf)) {
48122+ !object_starts_on_stack(this->upper_buf)) {
48123 sg_init_one(sgl, this->upper_buf, this->upper_len);
48124 ret = dma_map_sg(this->dev, sgl, 1, dr);
48125 if (ret == 0)
48126diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
48127index a5dfbfb..8042ab4 100644
48128--- a/drivers/mtd/nftlmount.c
48129+++ b/drivers/mtd/nftlmount.c
48130@@ -24,6 +24,7 @@
48131 #include <asm/errno.h>
48132 #include <linux/delay.h>
48133 #include <linux/slab.h>
48134+#include <linux/sched.h>
48135 #include <linux/mtd/mtd.h>
48136 #include <linux/mtd/nand.h>
48137 #include <linux/mtd/nftl.h>
48138diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
48139index c23184a..4115c41 100644
48140--- a/drivers/mtd/sm_ftl.c
48141+++ b/drivers/mtd/sm_ftl.c
48142@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
48143 #define SM_CIS_VENDOR_OFFSET 0x59
48144 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
48145 {
48146- struct attribute_group *attr_group;
48147+ attribute_group_no_const *attr_group;
48148 struct attribute **attributes;
48149 struct sm_sysfs_attribute *vendor_attribute;
48150 char *vendor;
48151diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
48152index 7b11243..b3278a3 100644
48153--- a/drivers/net/bonding/bond_netlink.c
48154+++ b/drivers/net/bonding/bond_netlink.c
48155@@ -585,7 +585,7 @@ nla_put_failure:
48156 return -EMSGSIZE;
48157 }
48158
48159-struct rtnl_link_ops bond_link_ops __read_mostly = {
48160+struct rtnl_link_ops bond_link_ops = {
48161 .kind = "bond",
48162 .priv_size = sizeof(struct bonding),
48163 .setup = bond_setup,
48164diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
48165index b3b922a..80bba38 100644
48166--- a/drivers/net/caif/caif_hsi.c
48167+++ b/drivers/net/caif/caif_hsi.c
48168@@ -1444,7 +1444,7 @@ err:
48169 return -ENODEV;
48170 }
48171
48172-static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
48173+static struct rtnl_link_ops caif_hsi_link_ops = {
48174 .kind = "cfhsi",
48175 .priv_size = sizeof(struct cfhsi),
48176 .setup = cfhsi_setup,
48177diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
48178index 58808f65..0bdc7b3 100644
48179--- a/drivers/net/can/Kconfig
48180+++ b/drivers/net/can/Kconfig
48181@@ -98,7 +98,7 @@ config CAN_JANZ_ICAN3
48182
48183 config CAN_FLEXCAN
48184 tristate "Support for Freescale FLEXCAN based chips"
48185- depends on ARM || PPC
48186+ depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
48187 ---help---
48188 Say Y here if you want to support for Freescale FlexCAN.
48189
48190diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
48191index b0f6924..59e9640 100644
48192--- a/drivers/net/can/dev.c
48193+++ b/drivers/net/can/dev.c
48194@@ -959,7 +959,7 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
48195 return -EOPNOTSUPP;
48196 }
48197
48198-static struct rtnl_link_ops can_link_ops __read_mostly = {
48199+static struct rtnl_link_ops can_link_ops = {
48200 .kind = "can",
48201 .maxtype = IFLA_CAN_MAX,
48202 .policy = can_policy,
48203diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
48204index 674f367..ec3a31f 100644
48205--- a/drivers/net/can/vcan.c
48206+++ b/drivers/net/can/vcan.c
48207@@ -163,7 +163,7 @@ static void vcan_setup(struct net_device *dev)
48208 dev->destructor = free_netdev;
48209 }
48210
48211-static struct rtnl_link_ops vcan_link_ops __read_mostly = {
48212+static struct rtnl_link_ops vcan_link_ops = {
48213 .kind = "vcan",
48214 .setup = vcan_setup,
48215 };
48216diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
48217index 49adbf1..fff7ff8 100644
48218--- a/drivers/net/dummy.c
48219+++ b/drivers/net/dummy.c
48220@@ -164,7 +164,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
48221 return 0;
48222 }
48223
48224-static struct rtnl_link_ops dummy_link_ops __read_mostly = {
48225+static struct rtnl_link_ops dummy_link_ops = {
48226 .kind = DRV_NAME,
48227 .setup = dummy_setup,
48228 .validate = dummy_validate,
48229diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
48230index 0443654..4f0aa18 100644
48231--- a/drivers/net/ethernet/8390/ax88796.c
48232+++ b/drivers/net/ethernet/8390/ax88796.c
48233@@ -889,9 +889,11 @@ static int ax_probe(struct platform_device *pdev)
48234 if (ax->plat->reg_offsets)
48235 ei_local->reg_offset = ax->plat->reg_offsets;
48236 else {
48237+ resource_size_t _mem_size = mem_size;
48238+ do_div(_mem_size, 0x18);
48239 ei_local->reg_offset = ax->reg_offsets;
48240 for (ret = 0; ret < 0x18; ret++)
48241- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
48242+ ax->reg_offsets[ret] = _mem_size * ret;
48243 }
48244
48245 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
48246diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
48247index 6725dc0..163549c 100644
48248--- a/drivers/net/ethernet/altera/altera_tse_main.c
48249+++ b/drivers/net/ethernet/altera/altera_tse_main.c
48250@@ -1216,7 +1216,7 @@ static int tse_shutdown(struct net_device *dev)
48251 return 0;
48252 }
48253
48254-static struct net_device_ops altera_tse_netdev_ops = {
48255+static net_device_ops_no_const altera_tse_netdev_ops __read_only = {
48256 .ndo_open = tse_open,
48257 .ndo_stop = tse_shutdown,
48258 .ndo_start_xmit = tse_start_xmit,
48259@@ -1453,11 +1453,13 @@ static int altera_tse_probe(struct platform_device *pdev)
48260 ndev->netdev_ops = &altera_tse_netdev_ops;
48261 altera_tse_set_ethtool_ops(ndev);
48262
48263+ pax_open_kernel();
48264 altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
48265
48266 if (priv->hash_filter)
48267 altera_tse_netdev_ops.ndo_set_rx_mode =
48268 tse_set_rx_mode_hashfilter;
48269+ pax_close_kernel();
48270
48271 /* Scatter/gather IO is not supported,
48272 * so it is turned off
48273diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48274index 29a0927..5a348e24 100644
48275--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48276+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48277@@ -1122,14 +1122,14 @@ do { \
48278 * operations, everything works on mask values.
48279 */
48280 #define XMDIO_READ(_pdata, _mmd, _reg) \
48281- ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
48282+ ((_pdata)->hw_if->read_mmd_regs((_pdata), 0, \
48283 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
48284
48285 #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
48286 (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
48287
48288 #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
48289- ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
48290+ ((_pdata)->hw_if->write_mmd_regs((_pdata), 0, \
48291 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
48292
48293 #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
48294diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
48295index 8a50b01..39c1ad0 100644
48296--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
48297+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
48298@@ -187,7 +187,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
48299
48300 memcpy(pdata->ets, ets, sizeof(*pdata->ets));
48301
48302- pdata->hw_if.config_dcb_tc(pdata);
48303+ pdata->hw_if->config_dcb_tc(pdata);
48304
48305 return 0;
48306 }
48307@@ -226,7 +226,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
48308
48309 memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
48310
48311- pdata->hw_if.config_dcb_pfc(pdata);
48312+ pdata->hw_if->config_dcb_pfc(pdata);
48313
48314 return 0;
48315 }
48316diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48317index d81fc6b..6f8ab25 100644
48318--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48319+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48320@@ -347,7 +347,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
48321
48322 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
48323 {
48324- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48325+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48326 struct xgbe_channel *channel;
48327 struct xgbe_ring *ring;
48328 struct xgbe_ring_data *rdata;
48329@@ -388,7 +388,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
48330
48331 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
48332 {
48333- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48334+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48335 struct xgbe_channel *channel;
48336 struct xgbe_ring *ring;
48337 struct xgbe_ring_desc *rdesc;
48338@@ -620,17 +620,12 @@ err_out:
48339 return 0;
48340 }
48341
48342-void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
48343-{
48344- DBGPR("-->xgbe_init_function_ptrs_desc\n");
48345-
48346- desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
48347- desc_if->free_ring_resources = xgbe_free_ring_resources;
48348- desc_if->map_tx_skb = xgbe_map_tx_skb;
48349- desc_if->map_rx_buffer = xgbe_map_rx_buffer;
48350- desc_if->unmap_rdata = xgbe_unmap_rdata;
48351- desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
48352- desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
48353-
48354- DBGPR("<--xgbe_init_function_ptrs_desc\n");
48355-}
48356+const struct xgbe_desc_if default_xgbe_desc_if = {
48357+ .alloc_ring_resources = xgbe_alloc_ring_resources,
48358+ .free_ring_resources = xgbe_free_ring_resources,
48359+ .map_tx_skb = xgbe_map_tx_skb,
48360+ .map_rx_buffer = xgbe_map_rx_buffer,
48361+ .unmap_rdata = xgbe_unmap_rdata,
48362+ .wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init,
48363+ .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init,
48364+};
48365diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48366index 400757b..d8c53f6 100644
48367--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48368+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48369@@ -2748,7 +2748,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
48370
48371 static int xgbe_init(struct xgbe_prv_data *pdata)
48372 {
48373- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48374+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48375 int ret;
48376
48377 DBGPR("-->xgbe_init\n");
48378@@ -2813,108 +2813,103 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
48379 return 0;
48380 }
48381
48382-void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
48383-{
48384- DBGPR("-->xgbe_init_function_ptrs\n");
48385-
48386- hw_if->tx_complete = xgbe_tx_complete;
48387-
48388- hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
48389- hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
48390- hw_if->add_mac_addresses = xgbe_add_mac_addresses;
48391- hw_if->set_mac_address = xgbe_set_mac_address;
48392-
48393- hw_if->enable_rx_csum = xgbe_enable_rx_csum;
48394- hw_if->disable_rx_csum = xgbe_disable_rx_csum;
48395-
48396- hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
48397- hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
48398- hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
48399- hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
48400- hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
48401-
48402- hw_if->read_mmd_regs = xgbe_read_mmd_regs;
48403- hw_if->write_mmd_regs = xgbe_write_mmd_regs;
48404-
48405- hw_if->set_gmii_speed = xgbe_set_gmii_speed;
48406- hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
48407- hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
48408-
48409- hw_if->enable_tx = xgbe_enable_tx;
48410- hw_if->disable_tx = xgbe_disable_tx;
48411- hw_if->enable_rx = xgbe_enable_rx;
48412- hw_if->disable_rx = xgbe_disable_rx;
48413-
48414- hw_if->powerup_tx = xgbe_powerup_tx;
48415- hw_if->powerdown_tx = xgbe_powerdown_tx;
48416- hw_if->powerup_rx = xgbe_powerup_rx;
48417- hw_if->powerdown_rx = xgbe_powerdown_rx;
48418-
48419- hw_if->dev_xmit = xgbe_dev_xmit;
48420- hw_if->dev_read = xgbe_dev_read;
48421- hw_if->enable_int = xgbe_enable_int;
48422- hw_if->disable_int = xgbe_disable_int;
48423- hw_if->init = xgbe_init;
48424- hw_if->exit = xgbe_exit;
48425+const struct xgbe_hw_if default_xgbe_hw_if = {
48426+ .tx_complete = xgbe_tx_complete,
48427+
48428+ .set_promiscuous_mode = xgbe_set_promiscuous_mode,
48429+ .set_all_multicast_mode = xgbe_set_all_multicast_mode,
48430+ .add_mac_addresses = xgbe_add_mac_addresses,
48431+ .set_mac_address = xgbe_set_mac_address,
48432+
48433+ .enable_rx_csum = xgbe_enable_rx_csum,
48434+ .disable_rx_csum = xgbe_disable_rx_csum,
48435+
48436+ .enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping,
48437+ .disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping,
48438+ .enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering,
48439+ .disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering,
48440+ .update_vlan_hash_table = xgbe_update_vlan_hash_table,
48441+
48442+ .read_mmd_regs = xgbe_read_mmd_regs,
48443+ .write_mmd_regs = xgbe_write_mmd_regs,
48444+
48445+ .set_gmii_speed = xgbe_set_gmii_speed,
48446+ .set_gmii_2500_speed = xgbe_set_gmii_2500_speed,
48447+ .set_xgmii_speed = xgbe_set_xgmii_speed,
48448+
48449+ .enable_tx = xgbe_enable_tx,
48450+ .disable_tx = xgbe_disable_tx,
48451+ .enable_rx = xgbe_enable_rx,
48452+ .disable_rx = xgbe_disable_rx,
48453+
48454+ .powerup_tx = xgbe_powerup_tx,
48455+ .powerdown_tx = xgbe_powerdown_tx,
48456+ .powerup_rx = xgbe_powerup_rx,
48457+ .powerdown_rx = xgbe_powerdown_rx,
48458+
48459+ .dev_xmit = xgbe_dev_xmit,
48460+ .dev_read = xgbe_dev_read,
48461+ .enable_int = xgbe_enable_int,
48462+ .disable_int = xgbe_disable_int,
48463+ .init = xgbe_init,
48464+ .exit = xgbe_exit,
48465
48466 /* Descriptor related Sequences have to be initialized here */
48467- hw_if->tx_desc_init = xgbe_tx_desc_init;
48468- hw_if->rx_desc_init = xgbe_rx_desc_init;
48469- hw_if->tx_desc_reset = xgbe_tx_desc_reset;
48470- hw_if->rx_desc_reset = xgbe_rx_desc_reset;
48471- hw_if->is_last_desc = xgbe_is_last_desc;
48472- hw_if->is_context_desc = xgbe_is_context_desc;
48473- hw_if->tx_start_xmit = xgbe_tx_start_xmit;
48474+ .tx_desc_init = xgbe_tx_desc_init,
48475+ .rx_desc_init = xgbe_rx_desc_init,
48476+ .tx_desc_reset = xgbe_tx_desc_reset,
48477+ .rx_desc_reset = xgbe_rx_desc_reset,
48478+ .is_last_desc = xgbe_is_last_desc,
48479+ .is_context_desc = xgbe_is_context_desc,
48480+ .tx_start_xmit = xgbe_tx_start_xmit,
48481
48482 /* For FLOW ctrl */
48483- hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
48484- hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
48485+ .config_tx_flow_control = xgbe_config_tx_flow_control,
48486+ .config_rx_flow_control = xgbe_config_rx_flow_control,
48487
48488 /* For RX coalescing */
48489- hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
48490- hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
48491- hw_if->usec_to_riwt = xgbe_usec_to_riwt;
48492- hw_if->riwt_to_usec = xgbe_riwt_to_usec;
48493+ .config_rx_coalesce = xgbe_config_rx_coalesce,
48494+ .config_tx_coalesce = xgbe_config_tx_coalesce,
48495+ .usec_to_riwt = xgbe_usec_to_riwt,
48496+ .riwt_to_usec = xgbe_riwt_to_usec,
48497
48498 /* For RX and TX threshold config */
48499- hw_if->config_rx_threshold = xgbe_config_rx_threshold;
48500- hw_if->config_tx_threshold = xgbe_config_tx_threshold;
48501+ .config_rx_threshold = xgbe_config_rx_threshold,
48502+ .config_tx_threshold = xgbe_config_tx_threshold,
48503
48504 /* For RX and TX Store and Forward Mode config */
48505- hw_if->config_rsf_mode = xgbe_config_rsf_mode;
48506- hw_if->config_tsf_mode = xgbe_config_tsf_mode;
48507+ .config_rsf_mode = xgbe_config_rsf_mode,
48508+ .config_tsf_mode = xgbe_config_tsf_mode,
48509
48510 /* For TX DMA Operating on Second Frame config */
48511- hw_if->config_osp_mode = xgbe_config_osp_mode;
48512+ .config_osp_mode = xgbe_config_osp_mode,
48513
48514 /* For RX and TX PBL config */
48515- hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
48516- hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
48517- hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
48518- hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
48519- hw_if->config_pblx8 = xgbe_config_pblx8;
48520+ .config_rx_pbl_val = xgbe_config_rx_pbl_val,
48521+ .get_rx_pbl_val = xgbe_get_rx_pbl_val,
48522+ .config_tx_pbl_val = xgbe_config_tx_pbl_val,
48523+ .get_tx_pbl_val = xgbe_get_tx_pbl_val,
48524+ .config_pblx8 = xgbe_config_pblx8,
48525
48526 /* For MMC statistics support */
48527- hw_if->tx_mmc_int = xgbe_tx_mmc_int;
48528- hw_if->rx_mmc_int = xgbe_rx_mmc_int;
48529- hw_if->read_mmc_stats = xgbe_read_mmc_stats;
48530+ .tx_mmc_int = xgbe_tx_mmc_int,
48531+ .rx_mmc_int = xgbe_rx_mmc_int,
48532+ .read_mmc_stats = xgbe_read_mmc_stats,
48533
48534 /* For PTP config */
48535- hw_if->config_tstamp = xgbe_config_tstamp;
48536- hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
48537- hw_if->set_tstamp_time = xgbe_set_tstamp_time;
48538- hw_if->get_tstamp_time = xgbe_get_tstamp_time;
48539- hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
48540+ .config_tstamp = xgbe_config_tstamp,
48541+ .update_tstamp_addend = xgbe_update_tstamp_addend,
48542+ .set_tstamp_time = xgbe_set_tstamp_time,
48543+ .get_tstamp_time = xgbe_get_tstamp_time,
48544+ .get_tx_tstamp = xgbe_get_tx_tstamp,
48545
48546 /* For Data Center Bridging config */
48547- hw_if->config_dcb_tc = xgbe_config_dcb_tc;
48548- hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
48549+ .config_dcb_tc = xgbe_config_dcb_tc,
48550+ .config_dcb_pfc = xgbe_config_dcb_pfc,
48551
48552 /* For Receive Side Scaling */
48553- hw_if->enable_rss = xgbe_enable_rss;
48554- hw_if->disable_rss = xgbe_disable_rss;
48555- hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
48556- hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
48557-
48558- DBGPR("<--xgbe_init_function_ptrs\n");
48559-}
48560+ .enable_rss = xgbe_enable_rss,
48561+ .disable_rss = xgbe_disable_rss,
48562+ .set_rss_hash_key = xgbe_set_rss_hash_key,
48563+ .set_rss_lookup_table = xgbe_set_rss_lookup_table,
48564+};
48565diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48566index 885b02b..4b31a4c 100644
48567--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48568+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48569@@ -244,7 +244,7 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
48570 * support, tell it now
48571 */
48572 if (ring->tx.xmit_more)
48573- pdata->hw_if.tx_start_xmit(channel, ring);
48574+ pdata->hw_if->tx_start_xmit(channel, ring);
48575
48576 return NETDEV_TX_BUSY;
48577 }
48578@@ -272,7 +272,7 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
48579
48580 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
48581 {
48582- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48583+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48584 struct xgbe_channel *channel;
48585 enum xgbe_int int_id;
48586 unsigned int i;
48587@@ -294,7 +294,7 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
48588
48589 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
48590 {
48591- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48592+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48593 struct xgbe_channel *channel;
48594 enum xgbe_int int_id;
48595 unsigned int i;
48596@@ -317,7 +317,7 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
48597 static irqreturn_t xgbe_isr(int irq, void *data)
48598 {
48599 struct xgbe_prv_data *pdata = data;
48600- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48601+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48602 struct xgbe_channel *channel;
48603 unsigned int dma_isr, dma_ch_isr;
48604 unsigned int mac_isr, mac_tssr;
48605@@ -673,7 +673,7 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
48606
48607 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
48608 {
48609- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48610+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48611
48612 DBGPR("-->xgbe_init_tx_coalesce\n");
48613
48614@@ -687,7 +687,7 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
48615
48616 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
48617 {
48618- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48619+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48620
48621 DBGPR("-->xgbe_init_rx_coalesce\n");
48622
48623@@ -701,7 +701,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
48624
48625 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
48626 {
48627- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48628+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48629 struct xgbe_channel *channel;
48630 struct xgbe_ring *ring;
48631 struct xgbe_ring_data *rdata;
48632@@ -726,7 +726,7 @@ static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
48633
48634 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
48635 {
48636- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48637+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48638 struct xgbe_channel *channel;
48639 struct xgbe_ring *ring;
48640 struct xgbe_ring_data *rdata;
48641@@ -752,7 +752,7 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
48642 static void xgbe_adjust_link(struct net_device *netdev)
48643 {
48644 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48645- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48646+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48647 struct phy_device *phydev = pdata->phydev;
48648 int new_state = 0;
48649
48650@@ -860,7 +860,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
48651 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
48652 {
48653 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48654- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48655+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48656 unsigned long flags;
48657
48658 DBGPR("-->xgbe_powerdown\n");
48659@@ -898,7 +898,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
48660 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
48661 {
48662 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48663- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48664+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48665 unsigned long flags;
48666
48667 DBGPR("-->xgbe_powerup\n");
48668@@ -935,7 +935,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
48669
48670 static int xgbe_start(struct xgbe_prv_data *pdata)
48671 {
48672- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48673+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48674 struct net_device *netdev = pdata->netdev;
48675 int ret;
48676
48677@@ -976,7 +976,7 @@ err_napi:
48678
48679 static void xgbe_stop(struct xgbe_prv_data *pdata)
48680 {
48681- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48682+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48683 struct xgbe_channel *channel;
48684 struct net_device *netdev = pdata->netdev;
48685 struct netdev_queue *txq;
48686@@ -1203,7 +1203,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
48687 return -ERANGE;
48688 }
48689
48690- pdata->hw_if.config_tstamp(pdata, mac_tscr);
48691+ pdata->hw_if->config_tstamp(pdata, mac_tscr);
48692
48693 memcpy(&pdata->tstamp_config, &config, sizeof(config));
48694
48695@@ -1352,7 +1352,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
48696 static int xgbe_open(struct net_device *netdev)
48697 {
48698 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48699- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48700+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48701 int ret;
48702
48703 DBGPR("-->xgbe_open\n");
48704@@ -1424,7 +1424,7 @@ err_phy_init:
48705 static int xgbe_close(struct net_device *netdev)
48706 {
48707 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48708- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48709+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48710
48711 DBGPR("-->xgbe_close\n");
48712
48713@@ -1452,8 +1452,8 @@ static int xgbe_close(struct net_device *netdev)
48714 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
48715 {
48716 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48717- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48718- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48719+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48720+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48721 struct xgbe_channel *channel;
48722 struct xgbe_ring *ring;
48723 struct xgbe_packet_data *packet;
48724@@ -1521,7 +1521,7 @@ tx_netdev_return:
48725 static void xgbe_set_rx_mode(struct net_device *netdev)
48726 {
48727 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48728- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48729+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48730 unsigned int pr_mode, am_mode;
48731
48732 DBGPR("-->xgbe_set_rx_mode\n");
48733@@ -1540,7 +1540,7 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
48734 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
48735 {
48736 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48737- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48738+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48739 struct sockaddr *saddr = addr;
48740
48741 DBGPR("-->xgbe_set_mac_address\n");
48742@@ -1607,7 +1607,7 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
48743
48744 DBGPR("-->%s\n", __func__);
48745
48746- pdata->hw_if.read_mmc_stats(pdata);
48747+ pdata->hw_if->read_mmc_stats(pdata);
48748
48749 s->rx_packets = pstats->rxframecount_gb;
48750 s->rx_bytes = pstats->rxoctetcount_gb;
48751@@ -1634,7 +1634,7 @@ static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
48752 u16 vid)
48753 {
48754 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48755- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48756+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48757
48758 DBGPR("-->%s\n", __func__);
48759
48760@@ -1650,7 +1650,7 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
48761 u16 vid)
48762 {
48763 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48764- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48765+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48766
48767 DBGPR("-->%s\n", __func__);
48768
48769@@ -1716,7 +1716,7 @@ static int xgbe_set_features(struct net_device *netdev,
48770 netdev_features_t features)
48771 {
48772 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48773- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48774+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48775 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
48776 int ret = 0;
48777
48778@@ -1781,8 +1781,8 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
48779 static void xgbe_rx_refresh(struct xgbe_channel *channel)
48780 {
48781 struct xgbe_prv_data *pdata = channel->pdata;
48782- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48783- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48784+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48785+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48786 struct xgbe_ring *ring = channel->rx_ring;
48787 struct xgbe_ring_data *rdata;
48788
48789@@ -1835,8 +1835,8 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
48790 static int xgbe_tx_poll(struct xgbe_channel *channel)
48791 {
48792 struct xgbe_prv_data *pdata = channel->pdata;
48793- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48794- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48795+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48796+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48797 struct xgbe_ring *ring = channel->tx_ring;
48798 struct xgbe_ring_data *rdata;
48799 struct xgbe_ring_desc *rdesc;
48800@@ -1901,7 +1901,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
48801 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
48802 {
48803 struct xgbe_prv_data *pdata = channel->pdata;
48804- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48805+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48806 struct xgbe_ring *ring = channel->rx_ring;
48807 struct xgbe_ring_data *rdata;
48808 struct xgbe_packet_data *packet;
48809diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48810index ebf4893..a8f51c6 100644
48811--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48812+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48813@@ -203,7 +203,7 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
48814
48815 DBGPR("-->%s\n", __func__);
48816
48817- pdata->hw_if.read_mmc_stats(pdata);
48818+ pdata->hw_if->read_mmc_stats(pdata);
48819 for (i = 0; i < XGBE_STATS_COUNT; i++) {
48820 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
48821 *data++ = *(u64 *)stat;
48822@@ -378,7 +378,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
48823 struct ethtool_coalesce *ec)
48824 {
48825 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48826- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48827+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48828 unsigned int riwt;
48829
48830 DBGPR("-->xgbe_get_coalesce\n");
48831@@ -401,7 +401,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
48832 struct ethtool_coalesce *ec)
48833 {
48834 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48835- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48836+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48837 unsigned int rx_frames, rx_riwt, rx_usecs;
48838 unsigned int tx_frames, tx_usecs;
48839
48840@@ -536,7 +536,7 @@ static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
48841 const u8 *key, const u8 hfunc)
48842 {
48843 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48844- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48845+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48846 unsigned int ret;
48847
48848 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
48849diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48850index 32dd651..225cca3 100644
48851--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48852+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48853@@ -159,12 +159,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
48854 DBGPR("<--xgbe_default_config\n");
48855 }
48856
48857-static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
48858-{
48859- xgbe_init_function_ptrs_dev(&pdata->hw_if);
48860- xgbe_init_function_ptrs_desc(&pdata->desc_if);
48861-}
48862-
48863 #ifdef CONFIG_ACPI
48864 static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
48865 {
48866@@ -396,9 +390,8 @@ static int xgbe_probe(struct platform_device *pdev)
48867 memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
48868
48869 /* Set all the function pointers */
48870- xgbe_init_all_fptrs(pdata);
48871- hw_if = &pdata->hw_if;
48872- desc_if = &pdata->desc_if;
48873+ hw_if = pdata->hw_if = &default_xgbe_hw_if;
48874+ desc_if = pdata->desc_if = &default_xgbe_desc_if;
48875
48876 /* Issue software reset to device */
48877 hw_if->exit(pdata);
48878diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48879index 59e267f..0842a88 100644
48880--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48881+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48882@@ -126,7 +126,7 @@
48883 static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
48884 {
48885 struct xgbe_prv_data *pdata = mii->priv;
48886- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48887+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48888 int mmd_data;
48889
48890 DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
48891@@ -143,7 +143,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
48892 u16 mmd_val)
48893 {
48894 struct xgbe_prv_data *pdata = mii->priv;
48895- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48896+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48897 int mmd_data = mmd_val;
48898
48899 DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
48900diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48901index f326178..8bd7daf 100644
48902--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48903+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48904@@ -129,7 +129,7 @@ static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
48905 tstamp_cc);
48906 u64 nsec;
48907
48908- nsec = pdata->hw_if.get_tstamp_time(pdata);
48909+ nsec = pdata->hw_if->get_tstamp_time(pdata);
48910
48911 return nsec;
48912 }
48913@@ -158,7 +158,7 @@ static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
48914
48915 spin_lock_irqsave(&pdata->tstamp_lock, flags);
48916
48917- pdata->hw_if.update_tstamp_addend(pdata, addend);
48918+ pdata->hw_if->update_tstamp_addend(pdata, addend);
48919
48920 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
48921
48922diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
48923index 13e8f95..1d8beef 100644
48924--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
48925+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
48926@@ -675,8 +675,8 @@ struct xgbe_prv_data {
48927 int dev_irq;
48928 unsigned int per_channel_irq;
48929
48930- struct xgbe_hw_if hw_if;
48931- struct xgbe_desc_if desc_if;
48932+ struct xgbe_hw_if *hw_if;
48933+ struct xgbe_desc_if *desc_if;
48934
48935 /* AXI DMA settings */
48936 unsigned int coherent;
48937@@ -798,6 +798,9 @@ struct xgbe_prv_data {
48938 #endif
48939 };
48940
48941+extern const struct xgbe_hw_if default_xgbe_hw_if;
48942+extern const struct xgbe_desc_if default_xgbe_desc_if;
48943+
48944 /* Function prototypes*/
48945
48946 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
48947diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
48948index adcacda..fa6e0ae 100644
48949--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
48950+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
48951@@ -1065,7 +1065,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
48952 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
48953 {
48954 /* RX_MODE controlling object */
48955- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
48956+ bnx2x_init_rx_mode_obj(bp);
48957
48958 /* multicast configuration controlling object */
48959 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
48960diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
48961index 07cdf9b..b08ecc7 100644
48962--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
48963+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
48964@@ -2329,15 +2329,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
48965 return rc;
48966 }
48967
48968-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
48969- struct bnx2x_rx_mode_obj *o)
48970+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
48971 {
48972 if (CHIP_IS_E1x(bp)) {
48973- o->wait_comp = bnx2x_empty_rx_mode_wait;
48974- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
48975+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
48976+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
48977 } else {
48978- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
48979- o->config_rx_mode = bnx2x_set_rx_mode_e2;
48980+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
48981+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
48982 }
48983 }
48984
48985diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
48986index 86baecb..ff3bb46 100644
48987--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
48988+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
48989@@ -1411,8 +1411,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
48990
48991 /********************* RX MODE ****************/
48992
48993-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
48994- struct bnx2x_rx_mode_obj *o);
48995+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
48996
48997 /**
48998 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
48999diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
49000index 31c9f82..e65e986 100644
49001--- a/drivers/net/ethernet/broadcom/tg3.h
49002+++ b/drivers/net/ethernet/broadcom/tg3.h
49003@@ -150,6 +150,7 @@
49004 #define CHIPREV_ID_5750_A0 0x4000
49005 #define CHIPREV_ID_5750_A1 0x4001
49006 #define CHIPREV_ID_5750_A3 0x4003
49007+#define CHIPREV_ID_5750_C1 0x4201
49008 #define CHIPREV_ID_5750_C2 0x4202
49009 #define CHIPREV_ID_5752_A0_HW 0x5000
49010 #define CHIPREV_ID_5752_A0 0x6000
49011diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
49012index 903466e..b285864 100644
49013--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
49014+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
49015@@ -1693,10 +1693,10 @@ bna_cb_ioceth_reset(void *arg)
49016 }
49017
49018 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
49019- bna_cb_ioceth_enable,
49020- bna_cb_ioceth_disable,
49021- bna_cb_ioceth_hbfail,
49022- bna_cb_ioceth_reset
49023+ .enable_cbfn = bna_cb_ioceth_enable,
49024+ .disable_cbfn = bna_cb_ioceth_disable,
49025+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
49026+ .reset_cbfn = bna_cb_ioceth_reset
49027 };
49028
49029 static void bna_attr_init(struct bna_ioceth *ioceth)
49030diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
49031index 8cffcdf..aadf043 100644
49032--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
49033+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
49034@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
49035 */
49036 struct l2t_skb_cb {
49037 arp_failure_handler_func arp_failure_handler;
49038-};
49039+} __no_const;
49040
49041 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
49042
49043diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
49044index d929951..a2c23f5 100644
49045--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
49046+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
49047@@ -2215,7 +2215,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
49048
49049 int i;
49050 struct adapter *ap = netdev2adap(dev);
49051- static const unsigned int *reg_ranges;
49052+ const unsigned int *reg_ranges;
49053 int arr_size = 0, buf_size = 0;
49054
49055 if (is_t4(ap->params.chip)) {
49056diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
49057index badff18..e15c4ec 100644
49058--- a/drivers/net/ethernet/dec/tulip/de4x5.c
49059+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
49060@@ -5373,7 +5373,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
49061 for (i=0; i<ETH_ALEN; i++) {
49062 tmp.addr[i] = dev->dev_addr[i];
49063 }
49064- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
49065+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
49066 break;
49067
49068 case DE4X5_SET_HWADDR: /* Set the hardware address */
49069@@ -5413,7 +5413,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
49070 spin_lock_irqsave(&lp->lock, flags);
49071 memcpy(&statbuf, &lp->pktStats, ioc->len);
49072 spin_unlock_irqrestore(&lp->lock, flags);
49073- if (copy_to_user(ioc->data, &statbuf, ioc->len))
49074+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
49075 return -EFAULT;
49076 break;
49077 }
49078diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
49079index e6b790f..051ba2d 100644
49080--- a/drivers/net/ethernet/emulex/benet/be_main.c
49081+++ b/drivers/net/ethernet/emulex/benet/be_main.c
49082@@ -536,7 +536,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
49083
49084 if (wrapped)
49085 newacc += 65536;
49086- ACCESS_ONCE(*acc) = newacc;
49087+ ACCESS_ONCE_RW(*acc) = newacc;
49088 }
49089
49090 static void populate_erx_stats(struct be_adapter *adapter,
49091diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
49092index 6d0c5d5..55be363 100644
49093--- a/drivers/net/ethernet/faraday/ftgmac100.c
49094+++ b/drivers/net/ethernet/faraday/ftgmac100.c
49095@@ -30,6 +30,8 @@
49096 #include <linux/netdevice.h>
49097 #include <linux/phy.h>
49098 #include <linux/platform_device.h>
49099+#include <linux/interrupt.h>
49100+#include <linux/irqreturn.h>
49101 #include <net/ip.h>
49102
49103 #include "ftgmac100.h"
49104diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
49105index dce5f7b..2433466 100644
49106--- a/drivers/net/ethernet/faraday/ftmac100.c
49107+++ b/drivers/net/ethernet/faraday/ftmac100.c
49108@@ -31,6 +31,8 @@
49109 #include <linux/module.h>
49110 #include <linux/netdevice.h>
49111 #include <linux/platform_device.h>
49112+#include <linux/interrupt.h>
49113+#include <linux/irqreturn.h>
49114
49115 #include "ftmac100.h"
49116
49117diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
49118index fabcfa1..188fd22 100644
49119--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
49120+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
49121@@ -419,7 +419,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
49122 wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
49123
49124 /* Update the base adjustement value. */
49125- ACCESS_ONCE(pf->ptp_base_adj) = incval;
49126+ ACCESS_ONCE_RW(pf->ptp_base_adj) = incval;
49127 smp_mb(); /* Force the above update. */
49128 }
49129
49130diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
49131index 79c00f5..8da39f6 100644
49132--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
49133+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
49134@@ -785,7 +785,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
49135 }
49136
49137 /* update the base incval used to calculate frequency adjustment */
49138- ACCESS_ONCE(adapter->base_incval) = incval;
49139+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
49140 smp_mb();
49141
49142 /* need lock to prevent incorrect read while modifying cyclecounter */
49143diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
49144index 8c234ec..757331f 100644
49145--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
49146+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
49147@@ -468,8 +468,8 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
49148 wmb();
49149
49150 /* we want to dirty this cache line once */
49151- ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
49152- ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
49153+ ACCESS_ONCE_RW(ring->last_nr_txbb) = last_nr_txbb;
49154+ ACCESS_ONCE_RW(ring->cons) = ring_cons + txbbs_skipped;
49155
49156 netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
49157
49158diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
49159index 6223930..975033d 100644
49160--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
49161+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
49162@@ -3457,7 +3457,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
49163 struct __vxge_hw_fifo *fifo;
49164 struct vxge_hw_fifo_config *config;
49165 u32 txdl_size, txdl_per_memblock;
49166- struct vxge_hw_mempool_cbs fifo_mp_callback;
49167+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
49168+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
49169+ };
49170+
49171 struct __vxge_hw_virtualpath *vpath;
49172
49173 if ((vp == NULL) || (attr == NULL)) {
49174@@ -3540,8 +3543,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
49175 goto exit;
49176 }
49177
49178- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
49179-
49180 fifo->mempool =
49181 __vxge_hw_mempool_create(vpath->hldev,
49182 fifo->config->memblock_size,
49183diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
49184index 2bb48d5..d1a865d 100644
49185--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
49186+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
49187@@ -2324,7 +2324,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
49188 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
49189 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
49190 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
49191- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
49192+ pax_open_kernel();
49193+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
49194+ pax_close_kernel();
49195 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
49196 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
49197 max_tx_rings = QLCNIC_MAX_TX_RINGS;
49198diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
49199index be7d7a6..a8983f8 100644
49200--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
49201+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
49202@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
49203 case QLCNIC_NON_PRIV_FUNC:
49204 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
49205 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
49206- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
49207+ pax_open_kernel();
49208+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
49209+ pax_close_kernel();
49210 break;
49211 case QLCNIC_PRIV_FUNC:
49212 ahw->op_mode = QLCNIC_PRIV_FUNC;
49213 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
49214- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
49215+ pax_open_kernel();
49216+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
49217+ pax_close_kernel();
49218 break;
49219 case QLCNIC_MGMT_FUNC:
49220 ahw->op_mode = QLCNIC_MGMT_FUNC;
49221 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
49222- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
49223+ pax_open_kernel();
49224+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
49225+ pax_close_kernel();
49226 break;
49227 default:
49228 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
49229diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
49230index 332bb8a..e6adcd1 100644
49231--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
49232+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
49233@@ -1285,7 +1285,7 @@ flash_temp:
49234 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
49235 {
49236 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
49237- static const struct qlcnic_dump_operations *fw_dump_ops;
49238+ const struct qlcnic_dump_operations *fw_dump_ops;
49239 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
49240 u32 entry_offset, dump, no_entries, buf_offset = 0;
49241 int i, k, ops_cnt, ops_index, dump_size = 0;
49242diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
49243index c70ab40..00b28e0 100644
49244--- a/drivers/net/ethernet/realtek/r8169.c
49245+++ b/drivers/net/ethernet/realtek/r8169.c
49246@@ -788,22 +788,22 @@ struct rtl8169_private {
49247 struct mdio_ops {
49248 void (*write)(struct rtl8169_private *, int, int);
49249 int (*read)(struct rtl8169_private *, int);
49250- } mdio_ops;
49251+ } __no_const mdio_ops;
49252
49253 struct pll_power_ops {
49254 void (*down)(struct rtl8169_private *);
49255 void (*up)(struct rtl8169_private *);
49256- } pll_power_ops;
49257+ } __no_const pll_power_ops;
49258
49259 struct jumbo_ops {
49260 void (*enable)(struct rtl8169_private *);
49261 void (*disable)(struct rtl8169_private *);
49262- } jumbo_ops;
49263+ } __no_const jumbo_ops;
49264
49265 struct csi_ops {
49266 void (*write)(struct rtl8169_private *, int, int);
49267 u32 (*read)(struct rtl8169_private *, int);
49268- } csi_ops;
49269+ } __no_const csi_ops;
49270
49271 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
49272 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
49273diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
49274index 6b861e3..204ac86 100644
49275--- a/drivers/net/ethernet/sfc/ptp.c
49276+++ b/drivers/net/ethernet/sfc/ptp.c
49277@@ -822,7 +822,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
49278 ptp->start.dma_addr);
49279
49280 /* Clear flag that signals MC ready */
49281- ACCESS_ONCE(*start) = 0;
49282+ ACCESS_ONCE_RW(*start) = 0;
49283 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
49284 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
49285 EFX_BUG_ON_PARANOID(rc);
49286diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
49287index 10b6173..b605dfd5 100644
49288--- a/drivers/net/ethernet/sfc/selftest.c
49289+++ b/drivers/net/ethernet/sfc/selftest.c
49290@@ -46,7 +46,7 @@ struct efx_loopback_payload {
49291 struct iphdr ip;
49292 struct udphdr udp;
49293 __be16 iteration;
49294- const char msg[64];
49295+ char msg[64];
49296 } __packed;
49297
49298 /* Loopback test source MAC address */
49299diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49300index 08c483b..2c4a553 100644
49301--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49302+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49303@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
49304
49305 writel(value, ioaddr + MMC_CNTRL);
49306
49307- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
49308- MMC_CNTRL, value);
49309+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
49310+// MMC_CNTRL, value);
49311 }
49312
49313 /* To mask all all interrupts.*/
49314diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
49315index 17e2766..c332f1e 100644
49316--- a/drivers/net/ethernet/via/via-rhine.c
49317+++ b/drivers/net/ethernet/via/via-rhine.c
49318@@ -2514,7 +2514,7 @@ static struct platform_driver rhine_driver_platform = {
49319 }
49320 };
49321
49322-static struct dmi_system_id rhine_dmi_table[] __initdata = {
49323+static const struct dmi_system_id rhine_dmi_table[] __initconst = {
49324 {
49325 .ident = "EPIA-M",
49326 .matches = {
49327diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
49328index 384ca4f..dd7d4f9 100644
49329--- a/drivers/net/hyperv/hyperv_net.h
49330+++ b/drivers/net/hyperv/hyperv_net.h
49331@@ -171,7 +171,7 @@ struct rndis_device {
49332 enum rndis_device_state state;
49333 bool link_state;
49334 bool link_change;
49335- atomic_t new_req_id;
49336+ atomic_unchecked_t new_req_id;
49337
49338 spinlock_t request_lock;
49339 struct list_head req_list;
49340diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
49341index 7816d98..7890614 100644
49342--- a/drivers/net/hyperv/rndis_filter.c
49343+++ b/drivers/net/hyperv/rndis_filter.c
49344@@ -102,7 +102,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
49345 * template
49346 */
49347 set = &rndis_msg->msg.set_req;
49348- set->req_id = atomic_inc_return(&dev->new_req_id);
49349+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
49350
49351 /* Add to the request list */
49352 spin_lock_irqsave(&dev->request_lock, flags);
49353@@ -918,7 +918,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
49354
49355 /* Setup the rndis set */
49356 halt = &request->request_msg.msg.halt_req;
49357- halt->req_id = atomic_inc_return(&dev->new_req_id);
49358+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
49359
49360 /* Ignore return since this msg is optional. */
49361 rndis_filter_send_request(dev, request);
49362diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
49363index 34f846b..4a0d5b1 100644
49364--- a/drivers/net/ifb.c
49365+++ b/drivers/net/ifb.c
49366@@ -253,7 +253,7 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
49367 return 0;
49368 }
49369
49370-static struct rtnl_link_ops ifb_link_ops __read_mostly = {
49371+static struct rtnl_link_ops ifb_link_ops = {
49372 .kind = "ifb",
49373 .priv_size = sizeof(struct ifb_private),
49374 .setup = ifb_setup,
49375diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
49376index 1df38bd..4bc20b0 100644
49377--- a/drivers/net/macvlan.c
49378+++ b/drivers/net/macvlan.c
49379@@ -335,7 +335,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
49380 free_nskb:
49381 kfree_skb(nskb);
49382 err:
49383- atomic_long_inc(&skb->dev->rx_dropped);
49384+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
49385 }
49386
49387 static void macvlan_flush_sources(struct macvlan_port *port,
49388@@ -1459,13 +1459,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
49389 int macvlan_link_register(struct rtnl_link_ops *ops)
49390 {
49391 /* common fields */
49392- ops->priv_size = sizeof(struct macvlan_dev);
49393- ops->validate = macvlan_validate;
49394- ops->maxtype = IFLA_MACVLAN_MAX;
49395- ops->policy = macvlan_policy;
49396- ops->changelink = macvlan_changelink;
49397- ops->get_size = macvlan_get_size;
49398- ops->fill_info = macvlan_fill_info;
49399+ pax_open_kernel();
49400+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
49401+ *(void **)&ops->validate = macvlan_validate;
49402+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
49403+ *(const void **)&ops->policy = macvlan_policy;
49404+ *(void **)&ops->changelink = macvlan_changelink;
49405+ *(void **)&ops->get_size = macvlan_get_size;
49406+ *(void **)&ops->fill_info = macvlan_fill_info;
49407+ pax_close_kernel();
49408
49409 return rtnl_link_register(ops);
49410 };
49411@@ -1551,7 +1553,7 @@ static int macvlan_device_event(struct notifier_block *unused,
49412 return NOTIFY_DONE;
49413 }
49414
49415-static struct notifier_block macvlan_notifier_block __read_mostly = {
49416+static struct notifier_block macvlan_notifier_block = {
49417 .notifier_call = macvlan_device_event,
49418 };
49419
49420diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
49421index 27ecc5c..f636328 100644
49422--- a/drivers/net/macvtap.c
49423+++ b/drivers/net/macvtap.c
49424@@ -436,7 +436,7 @@ static void macvtap_setup(struct net_device *dev)
49425 dev->tx_queue_len = TUN_READQ_SIZE;
49426 }
49427
49428-static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
49429+static struct rtnl_link_ops macvtap_link_ops = {
49430 .kind = "macvtap",
49431 .setup = macvtap_setup,
49432 .newlink = macvtap_newlink,
49433@@ -1033,7 +1033,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
49434
49435 ret = 0;
49436 u = q->flags;
49437- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
49438+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
49439 put_user(u, &ifr->ifr_flags))
49440 ret = -EFAULT;
49441 macvtap_put_vlan(vlan);
49442@@ -1217,7 +1217,7 @@ static int macvtap_device_event(struct notifier_block *unused,
49443 return NOTIFY_DONE;
49444 }
49445
49446-static struct notifier_block macvtap_notifier_block __read_mostly = {
49447+static struct notifier_block macvtap_notifier_block = {
49448 .notifier_call = macvtap_device_event,
49449 };
49450
49451diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
49452index 34924df..a747360 100644
49453--- a/drivers/net/nlmon.c
49454+++ b/drivers/net/nlmon.c
49455@@ -154,7 +154,7 @@ static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[])
49456 return 0;
49457 }
49458
49459-static struct rtnl_link_ops nlmon_link_ops __read_mostly = {
49460+static struct rtnl_link_ops nlmon_link_ops = {
49461 .kind = "nlmon",
49462 .priv_size = sizeof(struct nlmon),
49463 .setup = nlmon_setup,
49464diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
49465index bdfe51f..e7845c7 100644
49466--- a/drivers/net/phy/phy_device.c
49467+++ b/drivers/net/phy/phy_device.c
49468@@ -218,7 +218,7 @@ EXPORT_SYMBOL(phy_device_create);
49469 * zero on success.
49470 *
49471 */
49472-static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
49473+static int get_phy_c45_ids(struct mii_bus *bus, int addr, int *phy_id,
49474 struct phy_c45_device_ids *c45_ids) {
49475 int phy_reg;
49476 int i, reg_addr;
49477@@ -288,7 +288,7 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
49478 * its return value is in turn returned.
49479 *
49480 */
49481-static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
49482+static int get_phy_id(struct mii_bus *bus, int addr, int *phy_id,
49483 bool is_c45, struct phy_c45_device_ids *c45_ids)
49484 {
49485 int phy_reg;
49486@@ -326,7 +326,7 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
49487 struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
49488 {
49489 struct phy_c45_device_ids c45_ids = {0};
49490- u32 phy_id = 0;
49491+ int phy_id = 0;
49492 int r;
49493
49494 r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
49495diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
49496index 9d15566..5ad4ef6 100644
49497--- a/drivers/net/ppp/ppp_generic.c
49498+++ b/drivers/net/ppp/ppp_generic.c
49499@@ -1022,7 +1022,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
49500 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
49501 struct ppp_stats stats;
49502 struct ppp_comp_stats cstats;
49503- char *vers;
49504
49505 switch (cmd) {
49506 case SIOCGPPPSTATS:
49507@@ -1044,8 +1043,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
49508 break;
49509
49510 case SIOCGPPPVER:
49511- vers = PPP_VERSION;
49512- if (copy_to_user(addr, vers, strlen(vers) + 1))
49513+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
49514 break;
49515 err = 0;
49516 break;
49517diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
49518index 079f7ad..b2a2bfa7 100644
49519--- a/drivers/net/slip/slhc.c
49520+++ b/drivers/net/slip/slhc.c
49521@@ -487,7 +487,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
49522 register struct tcphdr *thp;
49523 register struct iphdr *ip;
49524 register struct cstate *cs;
49525- int len, hdrlen;
49526+ long len, hdrlen;
49527 unsigned char *cp = icp;
49528
49529 /* We've got a compressed packet; read the change byte */
49530diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
49531index 7d39484..d58499d 100644
49532--- a/drivers/net/team/team.c
49533+++ b/drivers/net/team/team.c
49534@@ -2099,7 +2099,7 @@ static unsigned int team_get_num_rx_queues(void)
49535 return TEAM_DEFAULT_NUM_RX_QUEUES;
49536 }
49537
49538-static struct rtnl_link_ops team_link_ops __read_mostly = {
49539+static struct rtnl_link_ops team_link_ops = {
49540 .kind = DRV_NAME,
49541 .priv_size = sizeof(struct team),
49542 .setup = team_setup,
49543@@ -2889,7 +2889,7 @@ static int team_device_event(struct notifier_block *unused,
49544 return NOTIFY_DONE;
49545 }
49546
49547-static struct notifier_block team_notifier_block __read_mostly = {
49548+static struct notifier_block team_notifier_block = {
49549 .notifier_call = team_device_event,
49550 };
49551
49552diff --git a/drivers/net/tun.c b/drivers/net/tun.c
49553index 857dca4..642f532 100644
49554--- a/drivers/net/tun.c
49555+++ b/drivers/net/tun.c
49556@@ -1421,7 +1421,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
49557 return -EINVAL;
49558 }
49559
49560-static struct rtnl_link_ops tun_link_ops __read_mostly = {
49561+static struct rtnl_link_ops tun_link_ops = {
49562 .kind = DRV_NAME,
49563 .priv_size = sizeof(struct tun_struct),
49564 .setup = tun_setup,
49565@@ -1830,7 +1830,7 @@ unlock:
49566 }
49567
49568 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
49569- unsigned long arg, int ifreq_len)
49570+ unsigned long arg, size_t ifreq_len)
49571 {
49572 struct tun_file *tfile = file->private_data;
49573 struct tun_struct *tun;
49574@@ -1844,6 +1844,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
49575 int le;
49576 int ret;
49577
49578+ if (ifreq_len > sizeof ifr)
49579+ return -EFAULT;
49580+
49581 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
49582 if (copy_from_user(&ifr, argp, ifreq_len))
49583 return -EFAULT;
49584diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
49585index 778e915..58c4d95 100644
49586--- a/drivers/net/usb/hso.c
49587+++ b/drivers/net/usb/hso.c
49588@@ -70,7 +70,7 @@
49589 #include <asm/byteorder.h>
49590 #include <linux/serial_core.h>
49591 #include <linux/serial.h>
49592-
49593+#include <asm/local.h>
49594
49595 #define MOD_AUTHOR "Option Wireless"
49596 #define MOD_DESCRIPTION "USB High Speed Option driver"
49597@@ -1183,7 +1183,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
49598 struct urb *urb;
49599
49600 urb = serial->rx_urb[0];
49601- if (serial->port.count > 0) {
49602+ if (atomic_read(&serial->port.count) > 0) {
49603 count = put_rxbuf_data(urb, serial);
49604 if (count == -1)
49605 return;
49606@@ -1221,7 +1221,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
49607 DUMP1(urb->transfer_buffer, urb->actual_length);
49608
49609 /* Anyone listening? */
49610- if (serial->port.count == 0)
49611+ if (atomic_read(&serial->port.count) == 0)
49612 return;
49613
49614 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
49615@@ -1282,8 +1282,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
49616 tty_port_tty_set(&serial->port, tty);
49617
49618 /* check for port already opened, if not set the termios */
49619- serial->port.count++;
49620- if (serial->port.count == 1) {
49621+ if (atomic_inc_return(&serial->port.count) == 1) {
49622 serial->rx_state = RX_IDLE;
49623 /* Force default termio settings */
49624 _hso_serial_set_termios(tty, NULL);
49625@@ -1293,7 +1292,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
49626 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
49627 if (result) {
49628 hso_stop_serial_device(serial->parent);
49629- serial->port.count--;
49630+ atomic_dec(&serial->port.count);
49631 } else {
49632 kref_get(&serial->parent->ref);
49633 }
49634@@ -1331,10 +1330,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
49635
49636 /* reset the rts and dtr */
49637 /* do the actual close */
49638- serial->port.count--;
49639+ atomic_dec(&serial->port.count);
49640
49641- if (serial->port.count <= 0) {
49642- serial->port.count = 0;
49643+ if (atomic_read(&serial->port.count) <= 0) {
49644+ atomic_set(&serial->port.count, 0);
49645 tty_port_tty_set(&serial->port, NULL);
49646 if (!usb_gone)
49647 hso_stop_serial_device(serial->parent);
49648@@ -1417,7 +1416,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
49649
49650 /* the actual setup */
49651 spin_lock_irqsave(&serial->serial_lock, flags);
49652- if (serial->port.count)
49653+ if (atomic_read(&serial->port.count))
49654 _hso_serial_set_termios(tty, old);
49655 else
49656 tty->termios = *old;
49657@@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
49658 D1("Pending read interrupt on port %d\n", i);
49659 spin_lock(&serial->serial_lock);
49660 if (serial->rx_state == RX_IDLE &&
49661- serial->port.count > 0) {
49662+ atomic_read(&serial->port.count) > 0) {
49663 /* Setup and send a ctrl req read on
49664 * port i */
49665 if (!serial->rx_urb_filled[0]) {
49666@@ -3053,7 +3052,7 @@ static int hso_resume(struct usb_interface *iface)
49667 /* Start all serial ports */
49668 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
49669 if (serial_table[i] && (serial_table[i]->interface == iface)) {
49670- if (dev2ser(serial_table[i])->port.count) {
49671+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
49672 result =
49673 hso_start_serial_device(serial_table[i], GFP_NOIO);
49674 hso_kick_transmit(dev2ser(serial_table[i]));
49675diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
49676index 9f7c0ab..1577b4a 100644
49677--- a/drivers/net/usb/r8152.c
49678+++ b/drivers/net/usb/r8152.c
49679@@ -601,7 +601,7 @@ struct r8152 {
49680 void (*unload)(struct r8152 *);
49681 int (*eee_get)(struct r8152 *, struct ethtool_eee *);
49682 int (*eee_set)(struct r8152 *, struct ethtool_eee *);
49683- } rtl_ops;
49684+ } __no_const rtl_ops;
49685
49686 int intr_interval;
49687 u32 saved_wolopts;
49688diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
49689index a2515887..6d13233 100644
49690--- a/drivers/net/usb/sierra_net.c
49691+++ b/drivers/net/usb/sierra_net.c
49692@@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net";
49693 /* atomic counter partially included in MAC address to make sure 2 devices
49694 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
49695 */
49696-static atomic_t iface_counter = ATOMIC_INIT(0);
49697+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
49698
49699 /*
49700 * SYNC Timer Delay definition used to set the expiry time
49701@@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
49702 dev->net->netdev_ops = &sierra_net_device_ops;
49703
49704 /* change MAC addr to include, ifacenum, and to be unique */
49705- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
49706+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
49707 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
49708
49709 /* we will have to manufacture ethernet headers, prepare template */
49710diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
49711index 777757a..395a767 100644
49712--- a/drivers/net/usb/usbnet.c
49713+++ b/drivers/net/usb/usbnet.c
49714@@ -1285,7 +1285,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
49715 struct net_device *net)
49716 {
49717 struct usbnet *dev = netdev_priv(net);
49718- int length;
49719+ unsigned int length;
49720 struct urb *urb = NULL;
49721 struct skb_data *entry;
49722 struct driver_info *info = dev->driver_info;
49723@@ -1413,7 +1413,7 @@ not_drop:
49724 }
49725 } else
49726 netif_dbg(dev, tx_queued, dev->net,
49727- "> tx, len %d, type 0x%x\n", length, skb->protocol);
49728+ "> tx, len %u, type 0x%x\n", length, skb->protocol);
49729 #ifdef CONFIG_PM
49730 deferred:
49731 #endif
49732diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
49733index 59b0e97..a6ed579 100644
49734--- a/drivers/net/virtio_net.c
49735+++ b/drivers/net/virtio_net.c
49736@@ -48,7 +48,7 @@ module_param(gso, bool, 0444);
49737 #define RECEIVE_AVG_WEIGHT 64
49738
49739 /* Minimum alignment for mergeable packet buffers. */
49740-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
49741+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL)
49742
49743 #define VIRTNET_DRIVER_VERSION "1.0.0"
49744
49745diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
49746index fceb637..37c70fd 100644
49747--- a/drivers/net/vxlan.c
49748+++ b/drivers/net/vxlan.c
49749@@ -2935,7 +2935,7 @@ static struct net *vxlan_get_link_net(const struct net_device *dev)
49750 return vxlan->net;
49751 }
49752
49753-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
49754+static struct rtnl_link_ops vxlan_link_ops = {
49755 .kind = "vxlan",
49756 .maxtype = IFLA_VXLAN_MAX,
49757 .policy = vxlan_policy,
49758@@ -2983,7 +2983,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
49759 return NOTIFY_DONE;
49760 }
49761
49762-static struct notifier_block vxlan_notifier_block __read_mostly = {
49763+static struct notifier_block vxlan_notifier_block = {
49764 .notifier_call = vxlan_lowerdev_event,
49765 };
49766
49767diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
49768index 5920c99..ff2e4a5 100644
49769--- a/drivers/net/wan/lmc/lmc_media.c
49770+++ b/drivers/net/wan/lmc/lmc_media.c
49771@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
49772 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
49773
49774 lmc_media_t lmc_ds3_media = {
49775- lmc_ds3_init, /* special media init stuff */
49776- lmc_ds3_default, /* reset to default state */
49777- lmc_ds3_set_status, /* reset status to state provided */
49778- lmc_dummy_set_1, /* set clock source */
49779- lmc_dummy_set2_1, /* set line speed */
49780- lmc_ds3_set_100ft, /* set cable length */
49781- lmc_ds3_set_scram, /* set scrambler */
49782- lmc_ds3_get_link_status, /* get link status */
49783- lmc_dummy_set_1, /* set link status */
49784- lmc_ds3_set_crc_length, /* set CRC length */
49785- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49786- lmc_ds3_watchdog
49787+ .init = lmc_ds3_init, /* special media init stuff */
49788+ .defaults = lmc_ds3_default, /* reset to default state */
49789+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
49790+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
49791+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49792+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
49793+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
49794+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
49795+ .set_link_status = lmc_dummy_set_1, /* set link status */
49796+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
49797+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49798+ .watchdog = lmc_ds3_watchdog
49799 };
49800
49801 lmc_media_t lmc_hssi_media = {
49802- lmc_hssi_init, /* special media init stuff */
49803- lmc_hssi_default, /* reset to default state */
49804- lmc_hssi_set_status, /* reset status to state provided */
49805- lmc_hssi_set_clock, /* set clock source */
49806- lmc_dummy_set2_1, /* set line speed */
49807- lmc_dummy_set_1, /* set cable length */
49808- lmc_dummy_set_1, /* set scrambler */
49809- lmc_hssi_get_link_status, /* get link status */
49810- lmc_hssi_set_link_status, /* set link status */
49811- lmc_hssi_set_crc_length, /* set CRC length */
49812- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49813- lmc_hssi_watchdog
49814+ .init = lmc_hssi_init, /* special media init stuff */
49815+ .defaults = lmc_hssi_default, /* reset to default state */
49816+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
49817+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
49818+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49819+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49820+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49821+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
49822+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
49823+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
49824+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49825+ .watchdog = lmc_hssi_watchdog
49826 };
49827
49828-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
49829- lmc_ssi_default, /* reset to default state */
49830- lmc_ssi_set_status, /* reset status to state provided */
49831- lmc_ssi_set_clock, /* set clock source */
49832- lmc_ssi_set_speed, /* set line speed */
49833- lmc_dummy_set_1, /* set cable length */
49834- lmc_dummy_set_1, /* set scrambler */
49835- lmc_ssi_get_link_status, /* get link status */
49836- lmc_ssi_set_link_status, /* set link status */
49837- lmc_ssi_set_crc_length, /* set CRC length */
49838- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49839- lmc_ssi_watchdog
49840+lmc_media_t lmc_ssi_media = {
49841+ .init = lmc_ssi_init, /* special media init stuff */
49842+ .defaults = lmc_ssi_default, /* reset to default state */
49843+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
49844+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
49845+ .set_speed = lmc_ssi_set_speed, /* set line speed */
49846+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49847+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49848+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
49849+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
49850+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
49851+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49852+ .watchdog = lmc_ssi_watchdog
49853 };
49854
49855 lmc_media_t lmc_t1_media = {
49856- lmc_t1_init, /* special media init stuff */
49857- lmc_t1_default, /* reset to default state */
49858- lmc_t1_set_status, /* reset status to state provided */
49859- lmc_t1_set_clock, /* set clock source */
49860- lmc_dummy_set2_1, /* set line speed */
49861- lmc_dummy_set_1, /* set cable length */
49862- lmc_dummy_set_1, /* set scrambler */
49863- lmc_t1_get_link_status, /* get link status */
49864- lmc_dummy_set_1, /* set link status */
49865- lmc_t1_set_crc_length, /* set CRC length */
49866- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
49867- lmc_t1_watchdog
49868+ .init = lmc_t1_init, /* special media init stuff */
49869+ .defaults = lmc_t1_default, /* reset to default state */
49870+ .set_status = lmc_t1_set_status, /* reset status to state provided */
49871+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
49872+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49873+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49874+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49875+ .get_link_status = lmc_t1_get_link_status, /* get link status */
49876+ .set_link_status = lmc_dummy_set_1, /* set link status */
49877+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
49878+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
49879+ .watchdog = lmc_t1_watchdog
49880 };
49881
49882 static void
49883diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
49884index feacc3b..5bac0de 100644
49885--- a/drivers/net/wan/z85230.c
49886+++ b/drivers/net/wan/z85230.c
49887@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
49888
49889 struct z8530_irqhandler z8530_sync =
49890 {
49891- z8530_rx,
49892- z8530_tx,
49893- z8530_status
49894+ .rx = z8530_rx,
49895+ .tx = z8530_tx,
49896+ .status = z8530_status
49897 };
49898
49899 EXPORT_SYMBOL(z8530_sync);
49900@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
49901 }
49902
49903 static struct z8530_irqhandler z8530_dma_sync = {
49904- z8530_dma_rx,
49905- z8530_dma_tx,
49906- z8530_dma_status
49907+ .rx = z8530_dma_rx,
49908+ .tx = z8530_dma_tx,
49909+ .status = z8530_dma_status
49910 };
49911
49912 static struct z8530_irqhandler z8530_txdma_sync = {
49913- z8530_rx,
49914- z8530_dma_tx,
49915- z8530_dma_status
49916+ .rx = z8530_rx,
49917+ .tx = z8530_dma_tx,
49918+ .status = z8530_dma_status
49919 };
49920
49921 /**
49922@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
49923
49924 struct z8530_irqhandler z8530_nop=
49925 {
49926- z8530_rx_clear,
49927- z8530_tx_clear,
49928- z8530_status_clear
49929+ .rx = z8530_rx_clear,
49930+ .tx = z8530_tx_clear,
49931+ .status = z8530_status_clear
49932 };
49933
49934
49935diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
49936index 0b60295..b8bfa5b 100644
49937--- a/drivers/net/wimax/i2400m/rx.c
49938+++ b/drivers/net/wimax/i2400m/rx.c
49939@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
49940 if (i2400m->rx_roq == NULL)
49941 goto error_roq_alloc;
49942
49943- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
49944+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
49945 GFP_KERNEL);
49946 if (rd == NULL) {
49947 result = -ENOMEM;
49948diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
49949index e71a2ce..2268d61 100644
49950--- a/drivers/net/wireless/airo.c
49951+++ b/drivers/net/wireless/airo.c
49952@@ -7846,7 +7846,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
49953 struct airo_info *ai = dev->ml_priv;
49954 int ridcode;
49955 int enabled;
49956- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
49957+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
49958 unsigned char *iobuf;
49959
49960 /* Only super-user can write RIDs */
49961diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
49962index da92bfa..5a9001a 100644
49963--- a/drivers/net/wireless/at76c50x-usb.c
49964+++ b/drivers/net/wireless/at76c50x-usb.c
49965@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
49966 }
49967
49968 /* Convert timeout from the DFU status to jiffies */
49969-static inline unsigned long at76_get_timeout(struct dfu_status *s)
49970+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
49971 {
49972 return msecs_to_jiffies((s->poll_timeout[2] << 16)
49973 | (s->poll_timeout[1] << 8)
49974diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
49975index 2fd9e18..3f55bdd 100644
49976--- a/drivers/net/wireless/ath/ath10k/htc.c
49977+++ b/drivers/net/wireless/ath/ath10k/htc.c
49978@@ -849,7 +849,10 @@ int ath10k_htc_start(struct ath10k_htc *htc)
49979 /* registered target arrival callback from the HIF layer */
49980 int ath10k_htc_init(struct ath10k *ar)
49981 {
49982- struct ath10k_hif_cb htc_callbacks;
49983+ static struct ath10k_hif_cb htc_callbacks = {
49984+ .rx_completion = ath10k_htc_rx_completion_handler,
49985+ .tx_completion = ath10k_htc_tx_completion_handler,
49986+ };
49987 struct ath10k_htc_ep *ep = NULL;
49988 struct ath10k_htc *htc = &ar->htc;
49989
49990@@ -858,8 +861,6 @@ int ath10k_htc_init(struct ath10k *ar)
49991 ath10k_htc_reset_endpoint_states(htc);
49992
49993 /* setup HIF layer callbacks */
49994- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
49995- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
49996 htc->ar = ar;
49997
49998 /* Get HIF default pipe for HTC message exchange */
49999diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
50000index 527179c..a890150 100644
50001--- a/drivers/net/wireless/ath/ath10k/htc.h
50002+++ b/drivers/net/wireless/ath/ath10k/htc.h
50003@@ -270,13 +270,13 @@ enum ath10k_htc_ep_id {
50004
50005 struct ath10k_htc_ops {
50006 void (*target_send_suspend_complete)(struct ath10k *ar);
50007-};
50008+} __no_const;
50009
50010 struct ath10k_htc_ep_ops {
50011 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
50012 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
50013 void (*ep_tx_credits)(struct ath10k *);
50014-};
50015+} __no_const;
50016
50017 /* service connection information */
50018 struct ath10k_htc_svc_conn_req {
50019diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
50020index f816909..e56cd8b 100644
50021--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
50022+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
50023@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50024 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
50025 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
50026
50027- ACCESS_ONCE(ads->ds_link) = i->link;
50028- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
50029+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
50030+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
50031
50032 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
50033 ctl6 = SM(i->keytype, AR_EncrType);
50034@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50035
50036 if ((i->is_first || i->is_last) &&
50037 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
50038- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
50039+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
50040 | set11nTries(i->rates, 1)
50041 | set11nTries(i->rates, 2)
50042 | set11nTries(i->rates, 3)
50043 | (i->dur_update ? AR_DurUpdateEna : 0)
50044 | SM(0, AR_BurstDur);
50045
50046- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
50047+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
50048 | set11nRate(i->rates, 1)
50049 | set11nRate(i->rates, 2)
50050 | set11nRate(i->rates, 3);
50051 } else {
50052- ACCESS_ONCE(ads->ds_ctl2) = 0;
50053- ACCESS_ONCE(ads->ds_ctl3) = 0;
50054+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
50055+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
50056 }
50057
50058 if (!i->is_first) {
50059- ACCESS_ONCE(ads->ds_ctl0) = 0;
50060- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
50061- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
50062+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
50063+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
50064+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
50065 return;
50066 }
50067
50068@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50069 break;
50070 }
50071
50072- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
50073+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
50074 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
50075 | SM(i->txpower[0], AR_XmitPower0)
50076 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
50077@@ -289,27 +289,27 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50078 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
50079 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
50080
50081- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
50082- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
50083+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
50084+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
50085
50086 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
50087 return;
50088
50089- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
50090+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
50091 | set11nPktDurRTSCTS(i->rates, 1);
50092
50093- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
50094+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
50095 | set11nPktDurRTSCTS(i->rates, 3);
50096
50097- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
50098+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
50099 | set11nRateFlags(i->rates, 1)
50100 | set11nRateFlags(i->rates, 2)
50101 | set11nRateFlags(i->rates, 3)
50102 | SM(i->rtscts_rate, AR_RTSCTSRate);
50103
50104- ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
50105- ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
50106- ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
50107+ ACCESS_ONCE_RW(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
50108+ ACCESS_ONCE_RW(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
50109+ ACCESS_ONCE_RW(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
50110 }
50111
50112 static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
50113diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
50114index da84b70..83e4978 100644
50115--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
50116+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
50117@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50118 (i->qcu << AR_TxQcuNum_S) | desc_len;
50119
50120 checksum += val;
50121- ACCESS_ONCE(ads->info) = val;
50122+ ACCESS_ONCE_RW(ads->info) = val;
50123
50124 checksum += i->link;
50125- ACCESS_ONCE(ads->link) = i->link;
50126+ ACCESS_ONCE_RW(ads->link) = i->link;
50127
50128 checksum += i->buf_addr[0];
50129- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
50130+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
50131 checksum += i->buf_addr[1];
50132- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
50133+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
50134 checksum += i->buf_addr[2];
50135- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
50136+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
50137 checksum += i->buf_addr[3];
50138- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
50139+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
50140
50141 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
50142- ACCESS_ONCE(ads->ctl3) = val;
50143+ ACCESS_ONCE_RW(ads->ctl3) = val;
50144 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
50145- ACCESS_ONCE(ads->ctl5) = val;
50146+ ACCESS_ONCE_RW(ads->ctl5) = val;
50147 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
50148- ACCESS_ONCE(ads->ctl7) = val;
50149+ ACCESS_ONCE_RW(ads->ctl7) = val;
50150 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
50151- ACCESS_ONCE(ads->ctl9) = val;
50152+ ACCESS_ONCE_RW(ads->ctl9) = val;
50153
50154 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
50155- ACCESS_ONCE(ads->ctl10) = checksum;
50156+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
50157
50158 if (i->is_first || i->is_last) {
50159- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
50160+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
50161 | set11nTries(i->rates, 1)
50162 | set11nTries(i->rates, 2)
50163 | set11nTries(i->rates, 3)
50164 | (i->dur_update ? AR_DurUpdateEna : 0)
50165 | SM(0, AR_BurstDur);
50166
50167- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
50168+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
50169 | set11nRate(i->rates, 1)
50170 | set11nRate(i->rates, 2)
50171 | set11nRate(i->rates, 3);
50172 } else {
50173- ACCESS_ONCE(ads->ctl13) = 0;
50174- ACCESS_ONCE(ads->ctl14) = 0;
50175+ ACCESS_ONCE_RW(ads->ctl13) = 0;
50176+ ACCESS_ONCE_RW(ads->ctl14) = 0;
50177 }
50178
50179 ads->ctl20 = 0;
50180@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50181
50182 ctl17 = SM(i->keytype, AR_EncrType);
50183 if (!i->is_first) {
50184- ACCESS_ONCE(ads->ctl11) = 0;
50185- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
50186- ACCESS_ONCE(ads->ctl15) = 0;
50187- ACCESS_ONCE(ads->ctl16) = 0;
50188- ACCESS_ONCE(ads->ctl17) = ctl17;
50189- ACCESS_ONCE(ads->ctl18) = 0;
50190- ACCESS_ONCE(ads->ctl19) = 0;
50191+ ACCESS_ONCE_RW(ads->ctl11) = 0;
50192+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
50193+ ACCESS_ONCE_RW(ads->ctl15) = 0;
50194+ ACCESS_ONCE_RW(ads->ctl16) = 0;
50195+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
50196+ ACCESS_ONCE_RW(ads->ctl18) = 0;
50197+ ACCESS_ONCE_RW(ads->ctl19) = 0;
50198 return;
50199 }
50200
50201- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
50202+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
50203 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
50204 | SM(i->txpower[0], AR_XmitPower0)
50205 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
50206@@ -135,26 +135,26 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50207 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
50208 ctl12 |= SM(val, AR_PAPRDChainMask);
50209
50210- ACCESS_ONCE(ads->ctl12) = ctl12;
50211- ACCESS_ONCE(ads->ctl17) = ctl17;
50212+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
50213+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
50214
50215- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
50216+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
50217 | set11nPktDurRTSCTS(i->rates, 1);
50218
50219- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
50220+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
50221 | set11nPktDurRTSCTS(i->rates, 3);
50222
50223- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
50224+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
50225 | set11nRateFlags(i->rates, 1)
50226 | set11nRateFlags(i->rates, 2)
50227 | set11nRateFlags(i->rates, 3)
50228 | SM(i->rtscts_rate, AR_RTSCTSRate);
50229
50230- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
50231+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
50232
50233- ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
50234- ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
50235- ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
50236+ ACCESS_ONCE_RW(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
50237+ ACCESS_ONCE_RW(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
50238+ ACCESS_ONCE_RW(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
50239 }
50240
50241 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
50242diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
50243index e82e570..8c3cf90 100644
50244--- a/drivers/net/wireless/ath/ath9k/hw.h
50245+++ b/drivers/net/wireless/ath/ath9k/hw.h
50246@@ -646,7 +646,7 @@ struct ath_hw_private_ops {
50247
50248 /* ANI */
50249 void (*ani_cache_ini_regs)(struct ath_hw *ah);
50250-};
50251+} __no_const;
50252
50253 /**
50254 * struct ath_spec_scan - parameters for Atheros spectral scan
50255@@ -722,7 +722,7 @@ struct ath_hw_ops {
50256 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
50257 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
50258 #endif
50259-};
50260+} __no_const;
50261
50262 struct ath_nf_limits {
50263 s16 max;
50264diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
50265index 9ede991..a8f08fb 100644
50266--- a/drivers/net/wireless/ath/ath9k/main.c
50267+++ b/drivers/net/wireless/ath/ath9k/main.c
50268@@ -2537,16 +2537,18 @@ void ath9k_fill_chanctx_ops(void)
50269 if (!ath9k_is_chanctx_enabled())
50270 return;
50271
50272- ath9k_ops.hw_scan = ath9k_hw_scan;
50273- ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
50274- ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
50275- ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
50276- ath9k_ops.add_chanctx = ath9k_add_chanctx;
50277- ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
50278- ath9k_ops.change_chanctx = ath9k_change_chanctx;
50279- ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
50280- ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
50281- ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
50282+ pax_open_kernel();
50283+ *(void **)&ath9k_ops.hw_scan = ath9k_hw_scan;
50284+ *(void **)&ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
50285+ *(void **)&ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
50286+ *(void **)&ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
50287+ *(void **)&ath9k_ops.add_chanctx = ath9k_add_chanctx;
50288+ *(void **)&ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
50289+ *(void **)&ath9k_ops.change_chanctx = ath9k_change_chanctx;
50290+ *(void **)&ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
50291+ *(void **)&ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
50292+ *(void **)&ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
50293+ pax_close_kernel();
50294 }
50295
50296 #endif
50297diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
50298index 058a9f2..d5cb1ba 100644
50299--- a/drivers/net/wireless/b43/phy_lp.c
50300+++ b/drivers/net/wireless/b43/phy_lp.c
50301@@ -2502,7 +2502,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
50302 {
50303 struct ssb_bus *bus = dev->dev->sdev->bus;
50304
50305- static const struct b206x_channel *chandata = NULL;
50306+ const struct b206x_channel *chandata = NULL;
50307 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
50308 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
50309 u16 old_comm15, scale;
50310diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
50311index e566580..2c218ca 100644
50312--- a/drivers/net/wireless/iwlegacy/3945-mac.c
50313+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
50314@@ -3631,7 +3631,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
50315 */
50316 if (il3945_mod_params.disable_hw_scan) {
50317 D_INFO("Disabling hw_scan\n");
50318- il3945_mac_ops.hw_scan = NULL;
50319+ pax_open_kernel();
50320+ *(void **)&il3945_mac_ops.hw_scan = NULL;
50321+ pax_close_kernel();
50322 }
50323
50324 D_INFO("*** LOAD DRIVER ***\n");
50325diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50326index 0ffb6ff..c0b7f0e 100644
50327--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50328+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50329@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
50330 {
50331 struct iwl_priv *priv = file->private_data;
50332 char buf[64];
50333- int buf_size;
50334+ size_t buf_size;
50335 u32 offset, len;
50336
50337 memset(buf, 0, sizeof(buf));
50338@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
50339 struct iwl_priv *priv = file->private_data;
50340
50341 char buf[8];
50342- int buf_size;
50343+ size_t buf_size;
50344 u32 reset_flag;
50345
50346 memset(buf, 0, sizeof(buf));
50347@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
50348 {
50349 struct iwl_priv *priv = file->private_data;
50350 char buf[8];
50351- int buf_size;
50352+ size_t buf_size;
50353 int ht40;
50354
50355 memset(buf, 0, sizeof(buf));
50356@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
50357 {
50358 struct iwl_priv *priv = file->private_data;
50359 char buf[8];
50360- int buf_size;
50361+ size_t buf_size;
50362 int value;
50363
50364 memset(buf, 0, sizeof(buf));
50365@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
50366 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
50367 DEBUGFS_READ_FILE_OPS(current_sleep_command);
50368
50369-static const char *fmt_value = " %-30s %10u\n";
50370-static const char *fmt_hex = " %-30s 0x%02X\n";
50371-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
50372-static const char *fmt_header =
50373+static const char fmt_value[] = " %-30s %10u\n";
50374+static const char fmt_hex[] = " %-30s 0x%02X\n";
50375+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
50376+static const char fmt_header[] =
50377 "%-32s current cumulative delta max\n";
50378
50379 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
50380@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
50381 {
50382 struct iwl_priv *priv = file->private_data;
50383 char buf[8];
50384- int buf_size;
50385+ size_t buf_size;
50386 int clear;
50387
50388 memset(buf, 0, sizeof(buf));
50389@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
50390 {
50391 struct iwl_priv *priv = file->private_data;
50392 char buf[8];
50393- int buf_size;
50394+ size_t buf_size;
50395 int trace;
50396
50397 memset(buf, 0, sizeof(buf));
50398@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
50399 {
50400 struct iwl_priv *priv = file->private_data;
50401 char buf[8];
50402- int buf_size;
50403+ size_t buf_size;
50404 int missed;
50405
50406 memset(buf, 0, sizeof(buf));
50407@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
50408
50409 struct iwl_priv *priv = file->private_data;
50410 char buf[8];
50411- int buf_size;
50412+ size_t buf_size;
50413 int plcp;
50414
50415 memset(buf, 0, sizeof(buf));
50416@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
50417
50418 struct iwl_priv *priv = file->private_data;
50419 char buf[8];
50420- int buf_size;
50421+ size_t buf_size;
50422 int flush;
50423
50424 memset(buf, 0, sizeof(buf));
50425@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
50426
50427 struct iwl_priv *priv = file->private_data;
50428 char buf[8];
50429- int buf_size;
50430+ size_t buf_size;
50431 int rts;
50432
50433 if (!priv->cfg->ht_params)
50434@@ -2204,7 +2204,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
50435 {
50436 struct iwl_priv *priv = file->private_data;
50437 char buf[8];
50438- int buf_size;
50439+ size_t buf_size;
50440
50441 memset(buf, 0, sizeof(buf));
50442 buf_size = min(count, sizeof(buf) - 1);
50443@@ -2238,7 +2238,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
50444 struct iwl_priv *priv = file->private_data;
50445 u32 event_log_flag;
50446 char buf[8];
50447- int buf_size;
50448+ size_t buf_size;
50449
50450 /* check that the interface is up */
50451 if (!iwl_is_ready(priv))
50452@@ -2292,7 +2292,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
50453 struct iwl_priv *priv = file->private_data;
50454 char buf[8];
50455 u32 calib_disabled;
50456- int buf_size;
50457+ size_t buf_size;
50458
50459 memset(buf, 0, sizeof(buf));
50460 buf_size = min(count, sizeof(buf) - 1);
50461diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
50462index 69935aa..c1ca128 100644
50463--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
50464+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
50465@@ -1836,7 +1836,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
50466 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
50467
50468 char buf[8];
50469- int buf_size;
50470+ size_t buf_size;
50471 u32 reset_flag;
50472
50473 memset(buf, 0, sizeof(buf));
50474@@ -1857,7 +1857,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
50475 {
50476 struct iwl_trans *trans = file->private_data;
50477 char buf[8];
50478- int buf_size;
50479+ size_t buf_size;
50480 int csr;
50481
50482 memset(buf, 0, sizeof(buf));
50483diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
50484index 8908be6..fe97ddd 100644
50485--- a/drivers/net/wireless/mac80211_hwsim.c
50486+++ b/drivers/net/wireless/mac80211_hwsim.c
50487@@ -3070,20 +3070,20 @@ static int __init init_mac80211_hwsim(void)
50488 if (channels < 1)
50489 return -EINVAL;
50490
50491- mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
50492- mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
50493- mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
50494- mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
50495- mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
50496- mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
50497- mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
50498- mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
50499- mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
50500- mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
50501- mac80211_hwsim_mchan_ops.assign_vif_chanctx =
50502- mac80211_hwsim_assign_vif_chanctx;
50503- mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
50504- mac80211_hwsim_unassign_vif_chanctx;
50505+ pax_open_kernel();
50506+ memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops);
50507+ *(void **)&mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
50508+ *(void **)&mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
50509+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
50510+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
50511+ *(void **)&mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
50512+ *(void **)&mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
50513+ *(void **)&mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
50514+ *(void **)&mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
50515+ *(void **)&mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
50516+ *(void **)&mac80211_hwsim_mchan_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
50517+ *(void **)&mac80211_hwsim_mchan_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
50518+ pax_close_kernel();
50519
50520 spin_lock_init(&hwsim_radio_lock);
50521 INIT_LIST_HEAD(&hwsim_radios);
50522diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
50523index 60d44ce..884dd1c 100644
50524--- a/drivers/net/wireless/rndis_wlan.c
50525+++ b/drivers/net/wireless/rndis_wlan.c
50526@@ -1236,7 +1236,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
50527
50528 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
50529
50530- if (rts_threshold < 0 || rts_threshold > 2347)
50531+ if (rts_threshold > 2347)
50532 rts_threshold = 2347;
50533
50534 tmp = cpu_to_le32(rts_threshold);
50535diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
50536index 9bb398b..b0cc047 100644
50537--- a/drivers/net/wireless/rt2x00/rt2x00.h
50538+++ b/drivers/net/wireless/rt2x00/rt2x00.h
50539@@ -375,7 +375,7 @@ struct rt2x00_intf {
50540 * for hardware which doesn't support hardware
50541 * sequence counting.
50542 */
50543- atomic_t seqno;
50544+ atomic_unchecked_t seqno;
50545 };
50546
50547 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
50548diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
50549index 68b620b..92ecd9e 100644
50550--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
50551+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
50552@@ -224,9 +224,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
50553 * sequence counter given by mac80211.
50554 */
50555 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
50556- seqno = atomic_add_return(0x10, &intf->seqno);
50557+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
50558 else
50559- seqno = atomic_read(&intf->seqno);
50560+ seqno = atomic_read_unchecked(&intf->seqno);
50561
50562 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
50563 hdr->seq_ctrl |= cpu_to_le16(seqno);
50564diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
50565index b661f896..ddf7d2b 100644
50566--- a/drivers/net/wireless/ti/wl1251/sdio.c
50567+++ b/drivers/net/wireless/ti/wl1251/sdio.c
50568@@ -282,13 +282,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
50569
50570 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
50571
50572- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
50573- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
50574+ pax_open_kernel();
50575+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
50576+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
50577+ pax_close_kernel();
50578
50579 wl1251_info("using dedicated interrupt line");
50580 } else {
50581- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
50582- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
50583+ pax_open_kernel();
50584+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
50585+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
50586+ pax_close_kernel();
50587
50588 wl1251_info("using SDIO interrupt");
50589 }
50590diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
50591index 144d1f8..7030936 100644
50592--- a/drivers/net/wireless/ti/wl12xx/main.c
50593+++ b/drivers/net/wireless/ti/wl12xx/main.c
50594@@ -657,7 +657,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
50595 sizeof(wl->conf.mem));
50596
50597 /* read data preparation is only needed by wl127x */
50598- wl->ops->prepare_read = wl127x_prepare_read;
50599+ pax_open_kernel();
50600+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
50601+ pax_close_kernel();
50602
50603 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
50604 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
50605@@ -682,7 +684,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
50606 sizeof(wl->conf.mem));
50607
50608 /* read data preparation is only needed by wl127x */
50609- wl->ops->prepare_read = wl127x_prepare_read;
50610+ pax_open_kernel();
50611+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
50612+ pax_close_kernel();
50613
50614 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
50615 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
50616diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
50617index 717c4f5..a813aeb 100644
50618--- a/drivers/net/wireless/ti/wl18xx/main.c
50619+++ b/drivers/net/wireless/ti/wl18xx/main.c
50620@@ -1923,8 +1923,10 @@ static int wl18xx_setup(struct wl1271 *wl)
50621 }
50622
50623 if (!checksum_param) {
50624- wl18xx_ops.set_rx_csum = NULL;
50625- wl18xx_ops.init_vif = NULL;
50626+ pax_open_kernel();
50627+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
50628+ *(void **)&wl18xx_ops.init_vif = NULL;
50629+ pax_close_kernel();
50630 }
50631
50632 /* Enable 11a Band only if we have 5G antennas */
50633diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
50634index a912dc0..a8225ba 100644
50635--- a/drivers/net/wireless/zd1211rw/zd_usb.c
50636+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
50637@@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb)
50638 {
50639 struct zd_usb *usb = urb->context;
50640 struct zd_usb_interrupt *intr = &usb->intr;
50641- int len;
50642+ unsigned int len;
50643 u16 int_num;
50644
50645 ZD_ASSERT(in_interrupt());
50646diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
50647index ce2e2cf..f81e500 100644
50648--- a/drivers/nfc/nfcwilink.c
50649+++ b/drivers/nfc/nfcwilink.c
50650@@ -497,7 +497,7 @@ static struct nci_ops nfcwilink_ops = {
50651
50652 static int nfcwilink_probe(struct platform_device *pdev)
50653 {
50654- static struct nfcwilink *drv;
50655+ struct nfcwilink *drv;
50656 int rc;
50657 __u32 protocols;
50658
50659diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
50660index 24d3d24..ff70d28 100644
50661--- a/drivers/nfc/st21nfca/st21nfca.c
50662+++ b/drivers/nfc/st21nfca/st21nfca.c
50663@@ -588,7 +588,7 @@ static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *gate,
50664 goto exit;
50665 }
50666
50667- gate = uid_skb->data;
50668+ memcpy(gate, uid_skb->data, uid_skb->len);
50669 *len = uid_skb->len;
50670 exit:
50671 kfree_skb(uid_skb);
50672diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
50673index 3a896c9..ac7b1c8 100644
50674--- a/drivers/of/fdt.c
50675+++ b/drivers/of/fdt.c
50676@@ -1118,7 +1118,9 @@ static int __init of_fdt_raw_init(void)
50677 pr_warn("fdt: not creating '/sys/firmware/fdt': CRC check failed\n");
50678 return 0;
50679 }
50680- of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
50681+ pax_open_kernel();
50682+ *(size_t *)&of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
50683+ pax_close_kernel();
50684 return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
50685 }
50686 late_initcall(of_fdt_raw_init);
50687diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
50688index d93b2b6..ae50401 100644
50689--- a/drivers/oprofile/buffer_sync.c
50690+++ b/drivers/oprofile/buffer_sync.c
50691@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
50692 if (cookie == NO_COOKIE)
50693 offset = pc;
50694 if (cookie == INVALID_COOKIE) {
50695- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
50696+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
50697 offset = pc;
50698 }
50699 if (cookie != last_cookie) {
50700@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
50701 /* add userspace sample */
50702
50703 if (!mm) {
50704- atomic_inc(&oprofile_stats.sample_lost_no_mm);
50705+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
50706 return 0;
50707 }
50708
50709 cookie = lookup_dcookie(mm, s->eip, &offset);
50710
50711 if (cookie == INVALID_COOKIE) {
50712- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
50713+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
50714 return 0;
50715 }
50716
50717@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
50718 /* ignore backtraces if failed to add a sample */
50719 if (state == sb_bt_start) {
50720 state = sb_bt_ignore;
50721- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
50722+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
50723 }
50724 }
50725 release_mm(mm);
50726diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
50727index c0cc4e7..44d4e54 100644
50728--- a/drivers/oprofile/event_buffer.c
50729+++ b/drivers/oprofile/event_buffer.c
50730@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
50731 }
50732
50733 if (buffer_pos == buffer_size) {
50734- atomic_inc(&oprofile_stats.event_lost_overflow);
50735+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
50736 return;
50737 }
50738
50739diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
50740index ed2c3ec..deda85a 100644
50741--- a/drivers/oprofile/oprof.c
50742+++ b/drivers/oprofile/oprof.c
50743@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
50744 if (oprofile_ops.switch_events())
50745 return;
50746
50747- atomic_inc(&oprofile_stats.multiplex_counter);
50748+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
50749 start_switch_worker();
50750 }
50751
50752diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
50753index ee2cfce..7f8f699 100644
50754--- a/drivers/oprofile/oprofile_files.c
50755+++ b/drivers/oprofile/oprofile_files.c
50756@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
50757
50758 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
50759
50760-static ssize_t timeout_read(struct file *file, char __user *buf,
50761+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
50762 size_t count, loff_t *offset)
50763 {
50764 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
50765diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
50766index 59659ce..6c860a0 100644
50767--- a/drivers/oprofile/oprofile_stats.c
50768+++ b/drivers/oprofile/oprofile_stats.c
50769@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
50770 cpu_buf->sample_invalid_eip = 0;
50771 }
50772
50773- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
50774- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
50775- atomic_set(&oprofile_stats.event_lost_overflow, 0);
50776- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
50777- atomic_set(&oprofile_stats.multiplex_counter, 0);
50778+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
50779+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
50780+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
50781+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
50782+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
50783 }
50784
50785
50786diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
50787index 1fc622b..8c48fc3 100644
50788--- a/drivers/oprofile/oprofile_stats.h
50789+++ b/drivers/oprofile/oprofile_stats.h
50790@@ -13,11 +13,11 @@
50791 #include <linux/atomic.h>
50792
50793 struct oprofile_stat_struct {
50794- atomic_t sample_lost_no_mm;
50795- atomic_t sample_lost_no_mapping;
50796- atomic_t bt_lost_no_mapping;
50797- atomic_t event_lost_overflow;
50798- atomic_t multiplex_counter;
50799+ atomic_unchecked_t sample_lost_no_mm;
50800+ atomic_unchecked_t sample_lost_no_mapping;
50801+ atomic_unchecked_t bt_lost_no_mapping;
50802+ atomic_unchecked_t event_lost_overflow;
50803+ atomic_unchecked_t multiplex_counter;
50804 };
50805
50806 extern struct oprofile_stat_struct oprofile_stats;
50807diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
50808index 3f49345..c750d0b 100644
50809--- a/drivers/oprofile/oprofilefs.c
50810+++ b/drivers/oprofile/oprofilefs.c
50811@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
50812
50813 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
50814 {
50815- atomic_t *val = file->private_data;
50816- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
50817+ atomic_unchecked_t *val = file->private_data;
50818+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
50819 }
50820
50821
50822@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
50823
50824
50825 int oprofilefs_create_ro_atomic(struct dentry *root,
50826- char const *name, atomic_t *val)
50827+ char const *name, atomic_unchecked_t *val)
50828 {
50829 return __oprofilefs_create_file(root, name,
50830 &atomic_ro_fops, 0444, val);
50831diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
50832index bdef916..88c7dee 100644
50833--- a/drivers/oprofile/timer_int.c
50834+++ b/drivers/oprofile/timer_int.c
50835@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
50836 return NOTIFY_OK;
50837 }
50838
50839-static struct notifier_block __refdata oprofile_cpu_notifier = {
50840+static struct notifier_block oprofile_cpu_notifier = {
50841 .notifier_call = oprofile_cpu_notify,
50842 };
50843
50844diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
50845index 3b47080..6cd05dd 100644
50846--- a/drivers/parport/procfs.c
50847+++ b/drivers/parport/procfs.c
50848@@ -64,7 +64,7 @@ static int do_active_device(struct ctl_table *table, int write,
50849
50850 *ppos += len;
50851
50852- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
50853+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
50854 }
50855
50856 #ifdef CONFIG_PARPORT_1284
50857@@ -106,7 +106,7 @@ static int do_autoprobe(struct ctl_table *table, int write,
50858
50859 *ppos += len;
50860
50861- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
50862+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
50863 }
50864 #endif /* IEEE1284.3 support. */
50865
50866diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
50867index ba46e58..90cfc24 100644
50868--- a/drivers/pci/host/pci-host-generic.c
50869+++ b/drivers/pci/host/pci-host-generic.c
50870@@ -26,9 +26,9 @@
50871 #include <linux/platform_device.h>
50872
50873 struct gen_pci_cfg_bus_ops {
50874+ struct pci_ops ops;
50875 u32 bus_shift;
50876- void __iomem *(*map_bus)(struct pci_bus *, unsigned int, int);
50877-};
50878+} __do_const;
50879
50880 struct gen_pci_cfg_windows {
50881 struct resource res;
50882@@ -56,8 +56,12 @@ static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus,
50883 }
50884
50885 static struct gen_pci_cfg_bus_ops gen_pci_cfg_cam_bus_ops = {
50886+ .ops = {
50887+ .map_bus = gen_pci_map_cfg_bus_cam,
50888+ .read = pci_generic_config_read,
50889+ .write = pci_generic_config_write,
50890+ },
50891 .bus_shift = 16,
50892- .map_bus = gen_pci_map_cfg_bus_cam,
50893 };
50894
50895 static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
50896@@ -72,13 +76,12 @@ static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
50897 }
50898
50899 static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = {
50900+ .ops = {
50901+ .map_bus = gen_pci_map_cfg_bus_ecam,
50902+ .read = pci_generic_config_read,
50903+ .write = pci_generic_config_write,
50904+ },
50905 .bus_shift = 20,
50906- .map_bus = gen_pci_map_cfg_bus_ecam,
50907-};
50908-
50909-static struct pci_ops gen_pci_ops = {
50910- .read = pci_generic_config_read,
50911- .write = pci_generic_config_write,
50912 };
50913
50914 static const struct of_device_id gen_pci_of_match[] = {
50915@@ -219,7 +222,6 @@ static int gen_pci_probe(struct platform_device *pdev)
50916 .private_data = (void **)&pci,
50917 .setup = gen_pci_setup,
50918 .map_irq = of_irq_parse_and_map_pci,
50919- .ops = &gen_pci_ops,
50920 };
50921
50922 if (!pci)
50923@@ -241,7 +243,7 @@ static int gen_pci_probe(struct platform_device *pdev)
50924
50925 of_id = of_match_node(gen_pci_of_match, np);
50926 pci->cfg.ops = of_id->data;
50927- gen_pci_ops.map_bus = pci->cfg.ops->map_bus;
50928+ hw.ops = &pci->cfg.ops->ops;
50929 pci->host.dev.parent = dev;
50930 INIT_LIST_HEAD(&pci->host.windows);
50931 INIT_LIST_HEAD(&pci->resources);
50932diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
50933index 6ca2399..68d866b 100644
50934--- a/drivers/pci/hotplug/acpiphp_ibm.c
50935+++ b/drivers/pci/hotplug/acpiphp_ibm.c
50936@@ -452,7 +452,9 @@ static int __init ibm_acpiphp_init(void)
50937 goto init_cleanup;
50938 }
50939
50940- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
50941+ pax_open_kernel();
50942+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
50943+ pax_close_kernel();
50944 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
50945
50946 return retval;
50947diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
50948index 66b7bbe..26bee78 100644
50949--- a/drivers/pci/hotplug/cpcihp_generic.c
50950+++ b/drivers/pci/hotplug/cpcihp_generic.c
50951@@ -73,7 +73,6 @@ static u16 port;
50952 static unsigned int enum_bit;
50953 static u8 enum_mask;
50954
50955-static struct cpci_hp_controller_ops generic_hpc_ops;
50956 static struct cpci_hp_controller generic_hpc;
50957
50958 static int __init validate_parameters(void)
50959@@ -139,6 +138,10 @@ static int query_enum(void)
50960 return ((value & enum_mask) == enum_mask);
50961 }
50962
50963+static struct cpci_hp_controller_ops generic_hpc_ops = {
50964+ .query_enum = query_enum,
50965+};
50966+
50967 static int __init cpcihp_generic_init(void)
50968 {
50969 int status;
50970@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
50971 pci_dev_put(dev);
50972
50973 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
50974- generic_hpc_ops.query_enum = query_enum;
50975 generic_hpc.ops = &generic_hpc_ops;
50976
50977 status = cpci_hp_register_controller(&generic_hpc);
50978diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
50979index 7ecf34e..effed62 100644
50980--- a/drivers/pci/hotplug/cpcihp_zt5550.c
50981+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
50982@@ -59,7 +59,6 @@
50983 /* local variables */
50984 static bool debug;
50985 static bool poll;
50986-static struct cpci_hp_controller_ops zt5550_hpc_ops;
50987 static struct cpci_hp_controller zt5550_hpc;
50988
50989 /* Primary cPCI bus bridge device */
50990@@ -204,6 +203,10 @@ static int zt5550_hc_disable_irq(void)
50991 return 0;
50992 }
50993
50994+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
50995+ .query_enum = zt5550_hc_query_enum,
50996+};
50997+
50998 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
50999 {
51000 int status;
51001@@ -215,16 +218,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
51002 dbg("returned from zt5550_hc_config");
51003
51004 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
51005- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
51006 zt5550_hpc.ops = &zt5550_hpc_ops;
51007 if (!poll) {
51008 zt5550_hpc.irq = hc_dev->irq;
51009 zt5550_hpc.irq_flags = IRQF_SHARED;
51010 zt5550_hpc.dev_id = hc_dev;
51011
51012- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
51013- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
51014- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
51015+ pax_open_kernel();
51016+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
51017+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
51018+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
51019+ pax_open_kernel();
51020 } else {
51021 info("using ENUM# polling mode");
51022 }
51023diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
51024index 1e08ff8c..3cd145f 100644
51025--- a/drivers/pci/hotplug/cpqphp_nvram.c
51026+++ b/drivers/pci/hotplug/cpqphp_nvram.c
51027@@ -425,8 +425,10 @@ static u32 store_HRT (void __iomem *rom_start)
51028
51029 void compaq_nvram_init (void __iomem *rom_start)
51030 {
51031+#ifndef CONFIG_PAX_KERNEXEC
51032 if (rom_start)
51033 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
51034+#endif
51035
51036 dbg("int15 entry = %p\n", compaq_int15_entry_point);
51037
51038diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
51039index 56d8486..f26113f 100644
51040--- a/drivers/pci/hotplug/pci_hotplug_core.c
51041+++ b/drivers/pci/hotplug/pci_hotplug_core.c
51042@@ -436,8 +436,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
51043 return -EINVAL;
51044 }
51045
51046- slot->ops->owner = owner;
51047- slot->ops->mod_name = mod_name;
51048+ pax_open_kernel();
51049+ *(struct module **)&slot->ops->owner = owner;
51050+ *(const char **)&slot->ops->mod_name = mod_name;
51051+ pax_close_kernel();
51052
51053 mutex_lock(&pci_hp_mutex);
51054 /*
51055diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
51056index 07aa722..84514b4 100644
51057--- a/drivers/pci/hotplug/pciehp_core.c
51058+++ b/drivers/pci/hotplug/pciehp_core.c
51059@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
51060 struct slot *slot = ctrl->slot;
51061 struct hotplug_slot *hotplug = NULL;
51062 struct hotplug_slot_info *info = NULL;
51063- struct hotplug_slot_ops *ops = NULL;
51064+ hotplug_slot_ops_no_const *ops = NULL;
51065 char name[SLOT_NAME_SIZE];
51066 int retval = -ENOMEM;
51067
51068diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
51069index c3e7dfc..cbd9625 100644
51070--- a/drivers/pci/msi.c
51071+++ b/drivers/pci/msi.c
51072@@ -513,8 +513,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
51073 {
51074 struct attribute **msi_attrs;
51075 struct attribute *msi_attr;
51076- struct device_attribute *msi_dev_attr;
51077- struct attribute_group *msi_irq_group;
51078+ device_attribute_no_const *msi_dev_attr;
51079+ attribute_group_no_const *msi_irq_group;
51080 const struct attribute_group **msi_irq_groups;
51081 struct msi_desc *entry;
51082 int ret = -ENOMEM;
51083@@ -573,7 +573,7 @@ error_attrs:
51084 count = 0;
51085 msi_attr = msi_attrs[count];
51086 while (msi_attr) {
51087- msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
51088+ msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr);
51089 kfree(msi_attr->name);
51090 kfree(msi_dev_attr);
51091 ++count;
51092diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
51093index 312f23a..d21181c 100644
51094--- a/drivers/pci/pci-sysfs.c
51095+++ b/drivers/pci/pci-sysfs.c
51096@@ -1140,7 +1140,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
51097 {
51098 /* allocate attribute structure, piggyback attribute name */
51099 int name_len = write_combine ? 13 : 10;
51100- struct bin_attribute *res_attr;
51101+ bin_attribute_no_const *res_attr;
51102 int retval;
51103
51104 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
51105@@ -1317,7 +1317,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
51106 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
51107 {
51108 int retval;
51109- struct bin_attribute *attr;
51110+ bin_attribute_no_const *attr;
51111
51112 /* If the device has VPD, try to expose it in sysfs. */
51113 if (dev->vpd) {
51114@@ -1364,7 +1364,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
51115 {
51116 int retval;
51117 int rom_size = 0;
51118- struct bin_attribute *attr;
51119+ bin_attribute_no_const *attr;
51120
51121 if (!sysfs_initialized)
51122 return -EACCES;
51123diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
51124index 4091f82..7d98eef 100644
51125--- a/drivers/pci/pci.h
51126+++ b/drivers/pci/pci.h
51127@@ -99,7 +99,7 @@ struct pci_vpd_ops {
51128 struct pci_vpd {
51129 unsigned int len;
51130 const struct pci_vpd_ops *ops;
51131- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
51132+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
51133 };
51134
51135 int pci_vpd_pci22_init(struct pci_dev *dev);
51136diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
51137index 820740a..8b1c673 100644
51138--- a/drivers/pci/pcie/aspm.c
51139+++ b/drivers/pci/pcie/aspm.c
51140@@ -27,9 +27,9 @@
51141 #define MODULE_PARAM_PREFIX "pcie_aspm."
51142
51143 /* Note: those are not register definitions */
51144-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
51145-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
51146-#define ASPM_STATE_L1 (4) /* L1 state */
51147+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
51148+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
51149+#define ASPM_STATE_L1 (4U) /* L1 state */
51150 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
51151 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
51152
51153diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
51154index be35da2..ec16cdb 100644
51155--- a/drivers/pci/pcie/portdrv_pci.c
51156+++ b/drivers/pci/pcie/portdrv_pci.c
51157@@ -324,7 +324,7 @@ static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d)
51158 return 0;
51159 }
51160
51161-static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = {
51162+static const struct dmi_system_id __initconst pcie_portdrv_dmi_table[] = {
51163 /*
51164 * Boxes that should not use MSI for PCIe PME signaling.
51165 */
51166diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
51167index 8d2f400..c97cc91 100644
51168--- a/drivers/pci/probe.c
51169+++ b/drivers/pci/probe.c
51170@@ -175,7 +175,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
51171 u16 orig_cmd;
51172 struct pci_bus_region region, inverted_region;
51173
51174- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
51175+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
51176
51177 /* No printks while decoding is disabled! */
51178 if (!dev->mmio_always_on) {
51179diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
51180index 3f155e7..0f4b1f0 100644
51181--- a/drivers/pci/proc.c
51182+++ b/drivers/pci/proc.c
51183@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
51184 static int __init pci_proc_init(void)
51185 {
51186 struct pci_dev *dev = NULL;
51187+
51188+#ifdef CONFIG_GRKERNSEC_PROC_ADD
51189+#ifdef CONFIG_GRKERNSEC_PROC_USER
51190+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
51191+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51192+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
51193+#endif
51194+#else
51195 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
51196+#endif
51197 proc_create("devices", 0, proc_bus_pci_dir,
51198 &proc_bus_pci_dev_operations);
51199 proc_initialized = 1;
51200diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c
51201index 3474920..acc9581 100644
51202--- a/drivers/platform/chrome/chromeos_pstore.c
51203+++ b/drivers/platform/chrome/chromeos_pstore.c
51204@@ -13,7 +13,7 @@
51205 #include <linux/platform_device.h>
51206 #include <linux/pstore_ram.h>
51207
51208-static struct dmi_system_id chromeos_pstore_dmi_table[] __initdata = {
51209+static const struct dmi_system_id chromeos_pstore_dmi_table[] __initconst = {
51210 {
51211 /*
51212 * Today all Chromebooks/boxes ship with Google_* as version and
51213diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
51214index 1e1e594..8fe59c5 100644
51215--- a/drivers/platform/x86/alienware-wmi.c
51216+++ b/drivers/platform/x86/alienware-wmi.c
51217@@ -150,7 +150,7 @@ struct wmax_led_args {
51218 } __packed;
51219
51220 static struct platform_device *platform_device;
51221-static struct device_attribute *zone_dev_attrs;
51222+static device_attribute_no_const *zone_dev_attrs;
51223 static struct attribute **zone_attrs;
51224 static struct platform_zone *zone_data;
51225
51226@@ -160,7 +160,7 @@ static struct platform_driver platform_driver = {
51227 }
51228 };
51229
51230-static struct attribute_group zone_attribute_group = {
51231+static attribute_group_no_const zone_attribute_group = {
51232 .name = "rgb_zones",
51233 };
51234
51235diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
51236index 7543a56..367ca8ed 100644
51237--- a/drivers/platform/x86/asus-wmi.c
51238+++ b/drivers/platform/x86/asus-wmi.c
51239@@ -1589,6 +1589,10 @@ static int show_dsts(struct seq_file *m, void *data)
51240 int err;
51241 u32 retval = -1;
51242
51243+#ifdef CONFIG_GRKERNSEC_KMEM
51244+ return -EPERM;
51245+#endif
51246+
51247 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
51248
51249 if (err < 0)
51250@@ -1605,6 +1609,10 @@ static int show_devs(struct seq_file *m, void *data)
51251 int err;
51252 u32 retval = -1;
51253
51254+#ifdef CONFIG_GRKERNSEC_KMEM
51255+ return -EPERM;
51256+#endif
51257+
51258 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
51259 &retval);
51260
51261@@ -1629,6 +1637,10 @@ static int show_call(struct seq_file *m, void *data)
51262 union acpi_object *obj;
51263 acpi_status status;
51264
51265+#ifdef CONFIG_GRKERNSEC_KMEM
51266+ return -EPERM;
51267+#endif
51268+
51269 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
51270 1, asus->debug.method_id,
51271 &input, &output);
51272diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
51273index bceb30b..bf063d4 100644
51274--- a/drivers/platform/x86/compal-laptop.c
51275+++ b/drivers/platform/x86/compal-laptop.c
51276@@ -766,7 +766,7 @@ static int dmi_check_cb_extra(const struct dmi_system_id *id)
51277 return 1;
51278 }
51279
51280-static struct dmi_system_id __initdata compal_dmi_table[] = {
51281+static const struct dmi_system_id __initconst compal_dmi_table[] = {
51282 {
51283 .ident = "FL90/IFL90",
51284 .matches = {
51285diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
51286index 458e6c9..089aee7 100644
51287--- a/drivers/platform/x86/hdaps.c
51288+++ b/drivers/platform/x86/hdaps.c
51289@@ -514,7 +514,7 @@ static int __init hdaps_dmi_match_invert(const struct dmi_system_id *id)
51290 "ThinkPad T42p", so the order of the entries matters.
51291 If your ThinkPad is not recognized, please update to latest
51292 BIOS. This is especially the case for some R52 ThinkPads. */
51293-static struct dmi_system_id __initdata hdaps_whitelist[] = {
51294+static const struct dmi_system_id __initconst hdaps_whitelist[] = {
51295 HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad R50p", HDAPS_BOTH_AXES),
51296 HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R50"),
51297 HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R51"),
51298diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
51299index 97c2be1..2ee50ce 100644
51300--- a/drivers/platform/x86/ibm_rtl.c
51301+++ b/drivers/platform/x86/ibm_rtl.c
51302@@ -227,7 +227,7 @@ static void rtl_teardown_sysfs(void) {
51303 }
51304
51305
51306-static struct dmi_system_id __initdata ibm_rtl_dmi_table[] = {
51307+static const struct dmi_system_id __initconst ibm_rtl_dmi_table[] = {
51308 { \
51309 .matches = { \
51310 DMI_MATCH(DMI_SYS_VENDOR, "IBM"), \
51311diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
51312index a4a4258..a58a04c 100644
51313--- a/drivers/platform/x86/intel_oaktrail.c
51314+++ b/drivers/platform/x86/intel_oaktrail.c
51315@@ -298,7 +298,7 @@ static int dmi_check_cb(const struct dmi_system_id *id)
51316 return 0;
51317 }
51318
51319-static struct dmi_system_id __initdata oaktrail_dmi_table[] = {
51320+static const struct dmi_system_id __initconst oaktrail_dmi_table[] = {
51321 {
51322 .ident = "OakTrail platform",
51323 .matches = {
51324diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
51325index 0859877..59d596d 100644
51326--- a/drivers/platform/x86/msi-laptop.c
51327+++ b/drivers/platform/x86/msi-laptop.c
51328@@ -604,7 +604,7 @@ static int dmi_check_cb(const struct dmi_system_id *dmi)
51329 return 1;
51330 }
51331
51332-static struct dmi_system_id __initdata msi_dmi_table[] = {
51333+static const struct dmi_system_id __initconst msi_dmi_table[] = {
51334 {
51335 .ident = "MSI S270",
51336 .matches = {
51337@@ -999,12 +999,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
51338
51339 if (!quirks->ec_read_only) {
51340 /* allow userland write sysfs file */
51341- dev_attr_bluetooth.store = store_bluetooth;
51342- dev_attr_wlan.store = store_wlan;
51343- dev_attr_threeg.store = store_threeg;
51344- dev_attr_bluetooth.attr.mode |= S_IWUSR;
51345- dev_attr_wlan.attr.mode |= S_IWUSR;
51346- dev_attr_threeg.attr.mode |= S_IWUSR;
51347+ pax_open_kernel();
51348+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
51349+ *(void **)&dev_attr_wlan.store = store_wlan;
51350+ *(void **)&dev_attr_threeg.store = store_threeg;
51351+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
51352+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
51353+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
51354+ pax_close_kernel();
51355 }
51356
51357 /* disable hardware control by fn key */
51358diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
51359index 6d2bac0..ec2b029 100644
51360--- a/drivers/platform/x86/msi-wmi.c
51361+++ b/drivers/platform/x86/msi-wmi.c
51362@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
51363 static void msi_wmi_notify(u32 value, void *context)
51364 {
51365 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
51366- static struct key_entry *key;
51367+ struct key_entry *key;
51368 union acpi_object *obj;
51369 acpi_status status;
51370
51371diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
51372index 9e701b2..c68a7b5 100644
51373--- a/drivers/platform/x86/samsung-laptop.c
51374+++ b/drivers/platform/x86/samsung-laptop.c
51375@@ -1567,7 +1567,7 @@ static int __init samsung_dmi_matched(const struct dmi_system_id *d)
51376 return 0;
51377 }
51378
51379-static struct dmi_system_id __initdata samsung_dmi_table[] = {
51380+static const struct dmi_system_id __initconst samsung_dmi_table[] = {
51381 {
51382 .matches = {
51383 DMI_MATCH(DMI_SYS_VENDOR,
51384diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c
51385index e6aac72..e11ff24 100644
51386--- a/drivers/platform/x86/samsung-q10.c
51387+++ b/drivers/platform/x86/samsung-q10.c
51388@@ -95,7 +95,7 @@ static int __init dmi_check_callback(const struct dmi_system_id *id)
51389 return 1;
51390 }
51391
51392-static struct dmi_system_id __initdata samsungq10_dmi_table[] = {
51393+static const struct dmi_system_id __initconst samsungq10_dmi_table[] = {
51394 {
51395 .ident = "Samsung Q10",
51396 .matches = {
51397diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
51398index e51c1e7..71bb385 100644
51399--- a/drivers/platform/x86/sony-laptop.c
51400+++ b/drivers/platform/x86/sony-laptop.c
51401@@ -2526,7 +2526,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
51402 }
51403
51404 /* High speed charging function */
51405-static struct device_attribute *hsc_handle;
51406+static device_attribute_no_const *hsc_handle;
51407
51408 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
51409 struct device_attribute *attr,
51410@@ -2600,7 +2600,7 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
51411 }
51412
51413 /* low battery function */
51414-static struct device_attribute *lowbatt_handle;
51415+static device_attribute_no_const *lowbatt_handle;
51416
51417 static ssize_t sony_nc_lowbatt_store(struct device *dev,
51418 struct device_attribute *attr,
51419@@ -2666,7 +2666,7 @@ static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
51420 }
51421
51422 /* fan speed function */
51423-static struct device_attribute *fan_handle, *hsf_handle;
51424+static device_attribute_no_const *fan_handle, *hsf_handle;
51425
51426 static ssize_t sony_nc_hsfan_store(struct device *dev,
51427 struct device_attribute *attr,
51428@@ -2773,7 +2773,7 @@ static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
51429 }
51430
51431 /* USB charge function */
51432-static struct device_attribute *uc_handle;
51433+static device_attribute_no_const *uc_handle;
51434
51435 static ssize_t sony_nc_usb_charge_store(struct device *dev,
51436 struct device_attribute *attr,
51437@@ -2847,7 +2847,7 @@ static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
51438 }
51439
51440 /* Panel ID function */
51441-static struct device_attribute *panel_handle;
51442+static device_attribute_no_const *panel_handle;
51443
51444 static ssize_t sony_nc_panelid_show(struct device *dev,
51445 struct device_attribute *attr, char *buffer)
51446@@ -2894,7 +2894,7 @@ static void sony_nc_panelid_cleanup(struct platform_device *pd)
51447 }
51448
51449 /* smart connect function */
51450-static struct device_attribute *sc_handle;
51451+static device_attribute_no_const *sc_handle;
51452
51453 static ssize_t sony_nc_smart_conn_store(struct device *dev,
51454 struct device_attribute *attr,
51455@@ -4854,7 +4854,7 @@ static struct acpi_driver sony_pic_driver = {
51456 .drv.pm = &sony_pic_pm,
51457 };
51458
51459-static struct dmi_system_id __initdata sonypi_dmi_table[] = {
51460+static const struct dmi_system_id __initconst sonypi_dmi_table[] = {
51461 {
51462 .ident = "Sony Vaio",
51463 .matches = {
51464diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
51465index 3b8ceee..e18652c 100644
51466--- a/drivers/platform/x86/thinkpad_acpi.c
51467+++ b/drivers/platform/x86/thinkpad_acpi.c
51468@@ -2093,7 +2093,7 @@ static int hotkey_mask_get(void)
51469 return 0;
51470 }
51471
51472-void static hotkey_mask_warn_incomplete_mask(void)
51473+static void hotkey_mask_warn_incomplete_mask(void)
51474 {
51475 /* log only what the user can fix... */
51476 const u32 wantedmask = hotkey_driver_mask &
51477@@ -2437,10 +2437,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
51478 && !tp_features.bright_unkfw)
51479 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
51480 }
51481+}
51482
51483 #undef TPACPI_COMPARE_KEY
51484 #undef TPACPI_MAY_SEND_KEY
51485-}
51486
51487 /*
51488 * Polling driver
51489diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
51490index 438d4c7..ca8a2fb 100644
51491--- a/drivers/pnp/pnpbios/bioscalls.c
51492+++ b/drivers/pnp/pnpbios/bioscalls.c
51493@@ -59,7 +59,7 @@ do { \
51494 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
51495 } while(0)
51496
51497-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
51498+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
51499 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
51500
51501 /*
51502@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
51503
51504 cpu = get_cpu();
51505 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
51506+
51507+ pax_open_kernel();
51508 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
51509+ pax_close_kernel();
51510
51511 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
51512 spin_lock_irqsave(&pnp_bios_lock, flags);
51513@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
51514 :"memory");
51515 spin_unlock_irqrestore(&pnp_bios_lock, flags);
51516
51517+ pax_open_kernel();
51518 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
51519+ pax_close_kernel();
51520+
51521 put_cpu();
51522
51523 /* If we get here and this is set then the PnP BIOS faulted on us. */
51524@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
51525 return status;
51526 }
51527
51528-void pnpbios_calls_init(union pnp_bios_install_struct *header)
51529+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
51530 {
51531 int i;
51532
51533@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
51534 pnp_bios_callpoint.offset = header->fields.pm16offset;
51535 pnp_bios_callpoint.segment = PNP_CS16;
51536
51537+ pax_open_kernel();
51538+
51539 for_each_possible_cpu(i) {
51540 struct desc_struct *gdt = get_cpu_gdt_table(i);
51541 if (!gdt)
51542@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
51543 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
51544 (unsigned long)__va(header->fields.pm16dseg));
51545 }
51546+
51547+ pax_close_kernel();
51548 }
51549diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
51550index facd43b..b291260 100644
51551--- a/drivers/pnp/pnpbios/core.c
51552+++ b/drivers/pnp/pnpbios/core.c
51553@@ -494,7 +494,7 @@ static int __init exploding_pnp_bios(const struct dmi_system_id *d)
51554 return 0;
51555 }
51556
51557-static struct dmi_system_id pnpbios_dmi_table[] __initdata = {
51558+static const struct dmi_system_id pnpbios_dmi_table[] __initconst = {
51559 { /* PnPBIOS GPF on boot */
51560 .callback = exploding_pnp_bios,
51561 .ident = "Higraded P14H",
51562diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
51563index 0c52e2a..3421ab7 100644
51564--- a/drivers/power/pda_power.c
51565+++ b/drivers/power/pda_power.c
51566@@ -37,7 +37,11 @@ static int polling;
51567
51568 #if IS_ENABLED(CONFIG_USB_PHY)
51569 static struct usb_phy *transceiver;
51570-static struct notifier_block otg_nb;
51571+static int otg_handle_notification(struct notifier_block *nb,
51572+ unsigned long event, void *unused);
51573+static struct notifier_block otg_nb = {
51574+ .notifier_call = otg_handle_notification
51575+};
51576 #endif
51577
51578 static struct regulator *ac_draw;
51579@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
51580
51581 #if IS_ENABLED(CONFIG_USB_PHY)
51582 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
51583- otg_nb.notifier_call = otg_handle_notification;
51584 ret = usb_register_notifier(transceiver, &otg_nb);
51585 if (ret) {
51586 dev_err(dev, "failure to register otg notifier\n");
51587diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
51588index cc439fd..8fa30df 100644
51589--- a/drivers/power/power_supply.h
51590+++ b/drivers/power/power_supply.h
51591@@ -16,12 +16,12 @@ struct power_supply;
51592
51593 #ifdef CONFIG_SYSFS
51594
51595-extern void power_supply_init_attrs(struct device_type *dev_type);
51596+extern void power_supply_init_attrs(void);
51597 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
51598
51599 #else
51600
51601-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
51602+static inline void power_supply_init_attrs(void) {}
51603 #define power_supply_uevent NULL
51604
51605 #endif /* CONFIG_SYSFS */
51606diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
51607index 694e8cd..9f03483 100644
51608--- a/drivers/power/power_supply_core.c
51609+++ b/drivers/power/power_supply_core.c
51610@@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class);
51611 ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
51612 EXPORT_SYMBOL_GPL(power_supply_notifier);
51613
51614-static struct device_type power_supply_dev_type;
51615+extern const struct attribute_group *power_supply_attr_groups[];
51616+static struct device_type power_supply_dev_type = {
51617+ .groups = power_supply_attr_groups,
51618+};
51619
51620 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
51621 struct power_supply *supply)
51622@@ -637,7 +640,7 @@ static int __init power_supply_class_init(void)
51623 return PTR_ERR(power_supply_class);
51624
51625 power_supply_class->dev_uevent = power_supply_uevent;
51626- power_supply_init_attrs(&power_supply_dev_type);
51627+ power_supply_init_attrs();
51628
51629 return 0;
51630 }
51631diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
51632index 62653f5..d0bb485 100644
51633--- a/drivers/power/power_supply_sysfs.c
51634+++ b/drivers/power/power_supply_sysfs.c
51635@@ -238,17 +238,15 @@ static struct attribute_group power_supply_attr_group = {
51636 .is_visible = power_supply_attr_is_visible,
51637 };
51638
51639-static const struct attribute_group *power_supply_attr_groups[] = {
51640+const struct attribute_group *power_supply_attr_groups[] = {
51641 &power_supply_attr_group,
51642 NULL,
51643 };
51644
51645-void power_supply_init_attrs(struct device_type *dev_type)
51646+void power_supply_init_attrs(void)
51647 {
51648 int i;
51649
51650- dev_type->groups = power_supply_attr_groups;
51651-
51652 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
51653 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
51654 }
51655diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
51656index 84419af..268ede8 100644
51657--- a/drivers/powercap/powercap_sys.c
51658+++ b/drivers/powercap/powercap_sys.c
51659@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
51660 struct device_attribute name_attr;
51661 };
51662
51663+static ssize_t show_constraint_name(struct device *dev,
51664+ struct device_attribute *dev_attr,
51665+ char *buf);
51666+
51667 static struct powercap_constraint_attr
51668- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
51669+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
51670+ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
51671+ .power_limit_attr = {
51672+ .attr = {
51673+ .name = NULL,
51674+ .mode = S_IWUSR | S_IRUGO
51675+ },
51676+ .show = show_constraint_power_limit_uw,
51677+ .store = store_constraint_power_limit_uw
51678+ },
51679+
51680+ .time_window_attr = {
51681+ .attr = {
51682+ .name = NULL,
51683+ .mode = S_IWUSR | S_IRUGO
51684+ },
51685+ .show = show_constraint_time_window_us,
51686+ .store = store_constraint_time_window_us
51687+ },
51688+
51689+ .max_power_attr = {
51690+ .attr = {
51691+ .name = NULL,
51692+ .mode = S_IRUGO
51693+ },
51694+ .show = show_constraint_max_power_uw,
51695+ .store = NULL
51696+ },
51697+
51698+ .min_power_attr = {
51699+ .attr = {
51700+ .name = NULL,
51701+ .mode = S_IRUGO
51702+ },
51703+ .show = show_constraint_min_power_uw,
51704+ .store = NULL
51705+ },
51706+
51707+ .max_time_window_attr = {
51708+ .attr = {
51709+ .name = NULL,
51710+ .mode = S_IRUGO
51711+ },
51712+ .show = show_constraint_max_time_window_us,
51713+ .store = NULL
51714+ },
51715+
51716+ .min_time_window_attr = {
51717+ .attr = {
51718+ .name = NULL,
51719+ .mode = S_IRUGO
51720+ },
51721+ .show = show_constraint_min_time_window_us,
51722+ .store = NULL
51723+ },
51724+
51725+ .name_attr = {
51726+ .attr = {
51727+ .name = NULL,
51728+ .mode = S_IRUGO
51729+ },
51730+ .show = show_constraint_name,
51731+ .store = NULL
51732+ }
51733+ }
51734+};
51735
51736 /* A list of powercap control_types */
51737 static LIST_HEAD(powercap_cntrl_list);
51738@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
51739 }
51740
51741 static int create_constraint_attribute(int id, const char *name,
51742- int mode,
51743- struct device_attribute *dev_attr,
51744- ssize_t (*show)(struct device *,
51745- struct device_attribute *, char *),
51746- ssize_t (*store)(struct device *,
51747- struct device_attribute *,
51748- const char *, size_t)
51749- )
51750+ struct device_attribute *dev_attr)
51751 {
51752+ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
51753
51754- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
51755- id, name);
51756- if (!dev_attr->attr.name)
51757+ if (!name)
51758 return -ENOMEM;
51759- dev_attr->attr.mode = mode;
51760- dev_attr->show = show;
51761- dev_attr->store = store;
51762+
51763+ pax_open_kernel();
51764+ *(const char **)&dev_attr->attr.name = name;
51765+ pax_close_kernel();
51766
51767 return 0;
51768 }
51769@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
51770
51771 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
51772 ret = create_constraint_attribute(i, "power_limit_uw",
51773- S_IWUSR | S_IRUGO,
51774- &constraint_attrs[i].power_limit_attr,
51775- show_constraint_power_limit_uw,
51776- store_constraint_power_limit_uw);
51777+ &constraint_attrs[i].power_limit_attr);
51778 if (ret)
51779 goto err_alloc;
51780 ret = create_constraint_attribute(i, "time_window_us",
51781- S_IWUSR | S_IRUGO,
51782- &constraint_attrs[i].time_window_attr,
51783- show_constraint_time_window_us,
51784- store_constraint_time_window_us);
51785+ &constraint_attrs[i].time_window_attr);
51786 if (ret)
51787 goto err_alloc;
51788- ret = create_constraint_attribute(i, "name", S_IRUGO,
51789- &constraint_attrs[i].name_attr,
51790- show_constraint_name,
51791- NULL);
51792+ ret = create_constraint_attribute(i, "name",
51793+ &constraint_attrs[i].name_attr);
51794 if (ret)
51795 goto err_alloc;
51796- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
51797- &constraint_attrs[i].max_power_attr,
51798- show_constraint_max_power_uw,
51799- NULL);
51800+ ret = create_constraint_attribute(i, "max_power_uw",
51801+ &constraint_attrs[i].max_power_attr);
51802 if (ret)
51803 goto err_alloc;
51804- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
51805- &constraint_attrs[i].min_power_attr,
51806- show_constraint_min_power_uw,
51807- NULL);
51808+ ret = create_constraint_attribute(i, "min_power_uw",
51809+ &constraint_attrs[i].min_power_attr);
51810 if (ret)
51811 goto err_alloc;
51812 ret = create_constraint_attribute(i, "max_time_window_us",
51813- S_IRUGO,
51814- &constraint_attrs[i].max_time_window_attr,
51815- show_constraint_max_time_window_us,
51816- NULL);
51817+ &constraint_attrs[i].max_time_window_attr);
51818 if (ret)
51819 goto err_alloc;
51820 ret = create_constraint_attribute(i, "min_time_window_us",
51821- S_IRUGO,
51822- &constraint_attrs[i].min_time_window_attr,
51823- show_constraint_min_time_window_us,
51824- NULL);
51825+ &constraint_attrs[i].min_time_window_attr);
51826 if (ret)
51827 goto err_alloc;
51828
51829@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
51830 power_zone->zone_dev_attrs[count++] =
51831 &dev_attr_max_energy_range_uj.attr;
51832 if (power_zone->ops->get_energy_uj) {
51833+ pax_open_kernel();
51834 if (power_zone->ops->reset_energy_uj)
51835- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
51836+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
51837 else
51838- dev_attr_energy_uj.attr.mode = S_IRUGO;
51839+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
51840+ pax_close_kernel();
51841 power_zone->zone_dev_attrs[count++] =
51842 &dev_attr_energy_uj.attr;
51843 }
51844diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
51845index 9c5d414..c7900ce 100644
51846--- a/drivers/ptp/ptp_private.h
51847+++ b/drivers/ptp/ptp_private.h
51848@@ -51,7 +51,7 @@ struct ptp_clock {
51849 struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
51850 wait_queue_head_t tsev_wq;
51851 int defunct; /* tells readers to go away when clock is being removed */
51852- struct device_attribute *pin_dev_attr;
51853+ device_attribute_no_const *pin_dev_attr;
51854 struct attribute **pin_attr;
51855 struct attribute_group pin_attr_group;
51856 };
51857diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
51858index 302e626..12579af 100644
51859--- a/drivers/ptp/ptp_sysfs.c
51860+++ b/drivers/ptp/ptp_sysfs.c
51861@@ -280,7 +280,7 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
51862 goto no_pin_attr;
51863
51864 for (i = 0; i < n_pins; i++) {
51865- struct device_attribute *da = &ptp->pin_dev_attr[i];
51866+ device_attribute_no_const *da = &ptp->pin_dev_attr[i];
51867 sysfs_attr_init(&da->attr);
51868 da->attr.name = info->pin_config[i].name;
51869 da->attr.mode = 0644;
51870diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
51871index a4a8a6d..a3456f4 100644
51872--- a/drivers/regulator/core.c
51873+++ b/drivers/regulator/core.c
51874@@ -3529,7 +3529,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
51875 const struct regulation_constraints *constraints = NULL;
51876 const struct regulator_init_data *init_data;
51877 struct regulator_config *config = NULL;
51878- static atomic_t regulator_no = ATOMIC_INIT(-1);
51879+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(-1);
51880 struct regulator_dev *rdev;
51881 struct device *dev;
51882 int ret, i;
51883@@ -3613,7 +3613,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
51884 rdev->dev.class = &regulator_class;
51885 rdev->dev.parent = dev;
51886 dev_set_name(&rdev->dev, "regulator.%lu",
51887- (unsigned long) atomic_inc_return(&regulator_no));
51888+ (unsigned long) atomic_inc_return_unchecked(&regulator_no));
51889 ret = device_register(&rdev->dev);
51890 if (ret != 0) {
51891 put_device(&rdev->dev);
51892diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
51893index 7eee2ca..4024513 100644
51894--- a/drivers/regulator/max8660.c
51895+++ b/drivers/regulator/max8660.c
51896@@ -424,8 +424,10 @@ static int max8660_probe(struct i2c_client *client,
51897 max8660->shadow_regs[MAX8660_OVER1] = 5;
51898 } else {
51899 /* Otherwise devices can be toggled via software */
51900- max8660_dcdc_ops.enable = max8660_dcdc_enable;
51901- max8660_dcdc_ops.disable = max8660_dcdc_disable;
51902+ pax_open_kernel();
51903+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
51904+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
51905+ pax_close_kernel();
51906 }
51907
51908 /*
51909diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
51910index c3d55c2..0dddfe6 100644
51911--- a/drivers/regulator/max8973-regulator.c
51912+++ b/drivers/regulator/max8973-regulator.c
51913@@ -403,9 +403,11 @@ static int max8973_probe(struct i2c_client *client,
51914 if (!pdata || !pdata->enable_ext_control) {
51915 max->desc.enable_reg = MAX8973_VOUT;
51916 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
51917- max->ops.enable = regulator_enable_regmap;
51918- max->ops.disable = regulator_disable_regmap;
51919- max->ops.is_enabled = regulator_is_enabled_regmap;
51920+ pax_open_kernel();
51921+ *(void **)&max->ops.enable = regulator_enable_regmap;
51922+ *(void **)&max->ops.disable = regulator_disable_regmap;
51923+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
51924+ pax_close_kernel();
51925 }
51926
51927 if (pdata) {
51928diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
51929index 0d17c92..a29f627 100644
51930--- a/drivers/regulator/mc13892-regulator.c
51931+++ b/drivers/regulator/mc13892-regulator.c
51932@@ -584,10 +584,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
51933 mc13xxx_unlock(mc13892);
51934
51935 /* update mc13892_vcam ops */
51936- memcpy(&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
51937+ pax_open_kernel();
51938+ memcpy((void *)&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
51939 sizeof(struct regulator_ops));
51940- mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
51941- mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
51942+ *(void **)&mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
51943+ *(void **)&mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
51944+ pax_close_kernel();
51945 mc13892_regulators[MC13892_VCAM].desc.ops = &mc13892_vcam_ops;
51946
51947 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
51948diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
51949index 5b2e761..c8c8a4a 100644
51950--- a/drivers/rtc/rtc-cmos.c
51951+++ b/drivers/rtc/rtc-cmos.c
51952@@ -789,7 +789,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
51953 hpet_rtc_timer_init();
51954
51955 /* export at least the first block of NVRAM */
51956- nvram.size = address_space - NVRAM_OFFSET;
51957+ pax_open_kernel();
51958+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
51959+ pax_close_kernel();
51960 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
51961 if (retval < 0) {
51962 dev_dbg(dev, "can't create nvram file? %d\n", retval);
51963diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
51964index 799c34b..8e9786a 100644
51965--- a/drivers/rtc/rtc-dev.c
51966+++ b/drivers/rtc/rtc-dev.c
51967@@ -16,6 +16,7 @@
51968 #include <linux/module.h>
51969 #include <linux/rtc.h>
51970 #include <linux/sched.h>
51971+#include <linux/grsecurity.h>
51972 #include "rtc-core.h"
51973
51974 static dev_t rtc_devt;
51975@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
51976 if (copy_from_user(&tm, uarg, sizeof(tm)))
51977 return -EFAULT;
51978
51979+ gr_log_timechange();
51980+
51981 return rtc_set_time(rtc, &tm);
51982
51983 case RTC_PIE_ON:
51984diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
51985index 4ffabb3..1f87fca 100644
51986--- a/drivers/rtc/rtc-ds1307.c
51987+++ b/drivers/rtc/rtc-ds1307.c
51988@@ -107,7 +107,7 @@ struct ds1307 {
51989 u8 offset; /* register's offset */
51990 u8 regs[11];
51991 u16 nvram_offset;
51992- struct bin_attribute *nvram;
51993+ bin_attribute_no_const *nvram;
51994 enum ds_type type;
51995 unsigned long flags;
51996 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
51997diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
51998index 90abb5b..e0bf6dd 100644
51999--- a/drivers/rtc/rtc-m48t59.c
52000+++ b/drivers/rtc/rtc-m48t59.c
52001@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
52002 if (IS_ERR(m48t59->rtc))
52003 return PTR_ERR(m48t59->rtc);
52004
52005- m48t59_nvram_attr.size = pdata->offset;
52006+ pax_open_kernel();
52007+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
52008+ pax_close_kernel();
52009
52010 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
52011 if (ret)
52012diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
52013index e693af6..2e525b6 100644
52014--- a/drivers/scsi/bfa/bfa_fcpim.h
52015+++ b/drivers/scsi/bfa/bfa_fcpim.h
52016@@ -36,7 +36,7 @@ struct bfa_iotag_s {
52017
52018 struct bfa_itn_s {
52019 bfa_isr_func_t isr;
52020-};
52021+} __no_const;
52022
52023 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
52024 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
52025diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
52026index 0f19455..ef7adb5 100644
52027--- a/drivers/scsi/bfa/bfa_fcs.c
52028+++ b/drivers/scsi/bfa/bfa_fcs.c
52029@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
52030 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
52031
52032 static struct bfa_fcs_mod_s fcs_modules[] = {
52033- { bfa_fcs_port_attach, NULL, NULL },
52034- { bfa_fcs_uf_attach, NULL, NULL },
52035- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
52036- bfa_fcs_fabric_modexit },
52037+ {
52038+ .attach = bfa_fcs_port_attach,
52039+ .modinit = NULL,
52040+ .modexit = NULL
52041+ },
52042+ {
52043+ .attach = bfa_fcs_uf_attach,
52044+ .modinit = NULL,
52045+ .modexit = NULL
52046+ },
52047+ {
52048+ .attach = bfa_fcs_fabric_attach,
52049+ .modinit = bfa_fcs_fabric_modinit,
52050+ .modexit = bfa_fcs_fabric_modexit
52051+ },
52052 };
52053
52054 /*
52055diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
52056index ff75ef8..2dfe00a 100644
52057--- a/drivers/scsi/bfa/bfa_fcs_lport.c
52058+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
52059@@ -89,15 +89,26 @@ static struct {
52060 void (*offline) (struct bfa_fcs_lport_s *port);
52061 } __port_action[] = {
52062 {
52063- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
52064- bfa_fcs_lport_unknown_offline}, {
52065- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
52066- bfa_fcs_lport_fab_offline}, {
52067- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
52068- bfa_fcs_lport_n2n_offline}, {
52069- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
52070- bfa_fcs_lport_loop_offline},
52071- };
52072+ .init = bfa_fcs_lport_unknown_init,
52073+ .online = bfa_fcs_lport_unknown_online,
52074+ .offline = bfa_fcs_lport_unknown_offline
52075+ },
52076+ {
52077+ .init = bfa_fcs_lport_fab_init,
52078+ .online = bfa_fcs_lport_fab_online,
52079+ .offline = bfa_fcs_lport_fab_offline
52080+ },
52081+ {
52082+ .init = bfa_fcs_lport_n2n_init,
52083+ .online = bfa_fcs_lport_n2n_online,
52084+ .offline = bfa_fcs_lport_n2n_offline
52085+ },
52086+ {
52087+ .init = bfa_fcs_lport_loop_init,
52088+ .online = bfa_fcs_lport_loop_online,
52089+ .offline = bfa_fcs_lport_loop_offline
52090+ },
52091+};
52092
52093 /*
52094 * fcs_port_sm FCS logical port state machine
52095diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
52096index a38aafa0..fe8f03b 100644
52097--- a/drivers/scsi/bfa/bfa_ioc.h
52098+++ b/drivers/scsi/bfa/bfa_ioc.h
52099@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
52100 bfa_ioc_disable_cbfn_t disable_cbfn;
52101 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
52102 bfa_ioc_reset_cbfn_t reset_cbfn;
52103-};
52104+} __no_const;
52105
52106 /*
52107 * IOC event notification mechanism.
52108@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
52109 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
52110 enum bfi_ioc_state fwstate);
52111 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
52112-};
52113+} __no_const;
52114
52115 /*
52116 * Queue element to wait for room in request queue. FIFO order is
52117diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
52118index a14c784..6de6790 100644
52119--- a/drivers/scsi/bfa/bfa_modules.h
52120+++ b/drivers/scsi/bfa/bfa_modules.h
52121@@ -78,12 +78,12 @@ enum {
52122 \
52123 extern struct bfa_module_s hal_mod_ ## __mod; \
52124 struct bfa_module_s hal_mod_ ## __mod = { \
52125- bfa_ ## __mod ## _meminfo, \
52126- bfa_ ## __mod ## _attach, \
52127- bfa_ ## __mod ## _detach, \
52128- bfa_ ## __mod ## _start, \
52129- bfa_ ## __mod ## _stop, \
52130- bfa_ ## __mod ## _iocdisable, \
52131+ .meminfo = bfa_ ## __mod ## _meminfo, \
52132+ .attach = bfa_ ## __mod ## _attach, \
52133+ .detach = bfa_ ## __mod ## _detach, \
52134+ .start = bfa_ ## __mod ## _start, \
52135+ .stop = bfa_ ## __mod ## _stop, \
52136+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
52137 }
52138
52139 #define BFA_CACHELINE_SZ (256)
52140diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
52141index 045c4e1..13de803 100644
52142--- a/drivers/scsi/fcoe/fcoe_sysfs.c
52143+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
52144@@ -33,8 +33,8 @@
52145 */
52146 #include "libfcoe.h"
52147
52148-static atomic_t ctlr_num;
52149-static atomic_t fcf_num;
52150+static atomic_unchecked_t ctlr_num;
52151+static atomic_unchecked_t fcf_num;
52152
52153 /*
52154 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
52155@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
52156 if (!ctlr)
52157 goto out;
52158
52159- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
52160+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
52161 ctlr->f = f;
52162 ctlr->mode = FIP_CONN_TYPE_FABRIC;
52163 INIT_LIST_HEAD(&ctlr->fcfs);
52164@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
52165 fcf->dev.parent = &ctlr->dev;
52166 fcf->dev.bus = &fcoe_bus_type;
52167 fcf->dev.type = &fcoe_fcf_device_type;
52168- fcf->id = atomic_inc_return(&fcf_num) - 1;
52169+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
52170 fcf->state = FCOE_FCF_STATE_UNKNOWN;
52171
52172 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
52173@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
52174 {
52175 int error;
52176
52177- atomic_set(&ctlr_num, 0);
52178- atomic_set(&fcf_num, 0);
52179+ atomic_set_unchecked(&ctlr_num, 0);
52180+ atomic_set_unchecked(&fcf_num, 0);
52181
52182 error = bus_register(&fcoe_bus_type);
52183 if (error)
52184diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
52185index 8bb173e..20236b4 100644
52186--- a/drivers/scsi/hosts.c
52187+++ b/drivers/scsi/hosts.c
52188@@ -42,7 +42,7 @@
52189 #include "scsi_logging.h"
52190
52191
52192-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
52193+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
52194
52195
52196 static void scsi_host_cls_release(struct device *dev)
52197@@ -392,7 +392,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
52198 * subtract one because we increment first then return, but we need to
52199 * know what the next host number was before increment
52200 */
52201- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
52202+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
52203 shost->dma_channel = 0xff;
52204
52205 /* These three are default values which can be overridden */
52206diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
52207index a1cfbd3..d7f8ebc 100644
52208--- a/drivers/scsi/hpsa.c
52209+++ b/drivers/scsi/hpsa.c
52210@@ -697,10 +697,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
52211 struct reply_queue_buffer *rq = &h->reply_queue[q];
52212
52213 if (h->transMethod & CFGTBL_Trans_io_accel1)
52214- return h->access.command_completed(h, q);
52215+ return h->access->command_completed(h, q);
52216
52217 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
52218- return h->access.command_completed(h, q);
52219+ return h->access->command_completed(h, q);
52220
52221 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
52222 a = rq->head[rq->current_entry];
52223@@ -837,7 +837,7 @@ static void enqueue_cmd_and_start_io(struct ctlr_info *h,
52224 break;
52225 default:
52226 set_performant_mode(h, c);
52227- h->access.submit_command(h, c);
52228+ h->access->submit_command(h, c);
52229 }
52230 }
52231
52232@@ -5369,17 +5369,17 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
52233
52234 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
52235 {
52236- return h->access.command_completed(h, q);
52237+ return h->access->command_completed(h, q);
52238 }
52239
52240 static inline bool interrupt_pending(struct ctlr_info *h)
52241 {
52242- return h->access.intr_pending(h);
52243+ return h->access->intr_pending(h);
52244 }
52245
52246 static inline long interrupt_not_for_us(struct ctlr_info *h)
52247 {
52248- return (h->access.intr_pending(h) == 0) ||
52249+ return (h->access->intr_pending(h) == 0) ||
52250 (h->interrupts_enabled == 0);
52251 }
52252
52253@@ -6270,7 +6270,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
52254 if (prod_index < 0)
52255 return prod_index;
52256 h->product_name = products[prod_index].product_name;
52257- h->access = *(products[prod_index].access);
52258+ h->access = products[prod_index].access;
52259
52260 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
52261 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
52262@@ -6649,7 +6649,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
52263 unsigned long flags;
52264 u32 lockup_detected;
52265
52266- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52267+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52268 spin_lock_irqsave(&h->lock, flags);
52269 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
52270 if (!lockup_detected) {
52271@@ -6924,7 +6924,7 @@ reinit_after_soft_reset:
52272 }
52273
52274 /* make sure the board interrupts are off */
52275- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52276+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52277
52278 if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
52279 goto clean2;
52280@@ -6960,7 +6960,7 @@ reinit_after_soft_reset:
52281 * fake ones to scoop up any residual completions.
52282 */
52283 spin_lock_irqsave(&h->lock, flags);
52284- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52285+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52286 spin_unlock_irqrestore(&h->lock, flags);
52287 hpsa_free_irqs(h);
52288 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
52289@@ -6979,9 +6979,9 @@ reinit_after_soft_reset:
52290 dev_info(&h->pdev->dev, "Board READY.\n");
52291 dev_info(&h->pdev->dev,
52292 "Waiting for stale completions to drain.\n");
52293- h->access.set_intr_mask(h, HPSA_INTR_ON);
52294+ h->access->set_intr_mask(h, HPSA_INTR_ON);
52295 msleep(10000);
52296- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52297+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52298
52299 rc = controller_reset_failed(h->cfgtable);
52300 if (rc)
52301@@ -7006,7 +7006,7 @@ reinit_after_soft_reset:
52302
52303
52304 /* Turn the interrupts on so we can service requests */
52305- h->access.set_intr_mask(h, HPSA_INTR_ON);
52306+ h->access->set_intr_mask(h, HPSA_INTR_ON);
52307
52308 hpsa_hba_inquiry(h);
52309 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
52310@@ -7079,7 +7079,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
52311 * To write all data in the battery backed cache to disks
52312 */
52313 hpsa_flush_cache(h);
52314- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52315+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52316 hpsa_free_irqs_and_disable_msix(h);
52317 }
52318
52319@@ -7200,7 +7200,7 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52320 CFGTBL_Trans_enable_directed_msix |
52321 (trans_support & (CFGTBL_Trans_io_accel1 |
52322 CFGTBL_Trans_io_accel2));
52323- struct access_method access = SA5_performant_access;
52324+ struct access_method *access = &SA5_performant_access;
52325
52326 /* This is a bit complicated. There are 8 registers on
52327 * the controller which we write to to tell it 8 different
52328@@ -7242,7 +7242,7 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52329 * perform the superfluous readl() after each command submission.
52330 */
52331 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
52332- access = SA5_performant_access_no_read;
52333+ access = &SA5_performant_access_no_read;
52334
52335 /* Controller spec: zero out this buffer. */
52336 for (i = 0; i < h->nreply_queues; i++)
52337@@ -7272,12 +7272,12 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52338 * enable outbound interrupt coalescing in accelerator mode;
52339 */
52340 if (trans_support & CFGTBL_Trans_io_accel1) {
52341- access = SA5_ioaccel_mode1_access;
52342+ access = &SA5_ioaccel_mode1_access;
52343 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
52344 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
52345 } else {
52346 if (trans_support & CFGTBL_Trans_io_accel2) {
52347- access = SA5_ioaccel_mode2_access;
52348+ access = &SA5_ioaccel_mode2_access;
52349 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
52350 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
52351 }
52352diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
52353index 6577130..955f9a4 100644
52354--- a/drivers/scsi/hpsa.h
52355+++ b/drivers/scsi/hpsa.h
52356@@ -143,7 +143,7 @@ struct ctlr_info {
52357 unsigned int msix_vector;
52358 unsigned int msi_vector;
52359 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
52360- struct access_method access;
52361+ struct access_method *access;
52362 char hba_mode_enabled;
52363
52364 /* queue and queue Info */
52365@@ -525,38 +525,38 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
52366 }
52367
52368 static struct access_method SA5_access = {
52369- SA5_submit_command,
52370- SA5_intr_mask,
52371- SA5_intr_pending,
52372- SA5_completed,
52373+ .submit_command = SA5_submit_command,
52374+ .set_intr_mask = SA5_intr_mask,
52375+ .intr_pending = SA5_intr_pending,
52376+ .command_completed = SA5_completed,
52377 };
52378
52379 static struct access_method SA5_ioaccel_mode1_access = {
52380- SA5_submit_command,
52381- SA5_performant_intr_mask,
52382- SA5_ioaccel_mode1_intr_pending,
52383- SA5_ioaccel_mode1_completed,
52384+ .submit_command = SA5_submit_command,
52385+ .set_intr_mask = SA5_performant_intr_mask,
52386+ .intr_pending = SA5_ioaccel_mode1_intr_pending,
52387+ .command_completed = SA5_ioaccel_mode1_completed,
52388 };
52389
52390 static struct access_method SA5_ioaccel_mode2_access = {
52391- SA5_submit_command_ioaccel2,
52392- SA5_performant_intr_mask,
52393- SA5_performant_intr_pending,
52394- SA5_performant_completed,
52395+ .submit_command = SA5_submit_command_ioaccel2,
52396+ .set_intr_mask = SA5_performant_intr_mask,
52397+ .intr_pending = SA5_performant_intr_pending,
52398+ .command_completed = SA5_performant_completed,
52399 };
52400
52401 static struct access_method SA5_performant_access = {
52402- SA5_submit_command,
52403- SA5_performant_intr_mask,
52404- SA5_performant_intr_pending,
52405- SA5_performant_completed,
52406+ .submit_command = SA5_submit_command,
52407+ .set_intr_mask = SA5_performant_intr_mask,
52408+ .intr_pending = SA5_performant_intr_pending,
52409+ .command_completed = SA5_performant_completed,
52410 };
52411
52412 static struct access_method SA5_performant_access_no_read = {
52413- SA5_submit_command_no_read,
52414- SA5_performant_intr_mask,
52415- SA5_performant_intr_pending,
52416- SA5_performant_completed,
52417+ .submit_command = SA5_submit_command_no_read,
52418+ .set_intr_mask = SA5_performant_intr_mask,
52419+ .intr_pending = SA5_performant_intr_pending,
52420+ .command_completed = SA5_performant_completed,
52421 };
52422
52423 struct board_type {
52424diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
52425index 1b3a094..068e683 100644
52426--- a/drivers/scsi/libfc/fc_exch.c
52427+++ b/drivers/scsi/libfc/fc_exch.c
52428@@ -101,12 +101,12 @@ struct fc_exch_mgr {
52429 u16 pool_max_index;
52430
52431 struct {
52432- atomic_t no_free_exch;
52433- atomic_t no_free_exch_xid;
52434- atomic_t xid_not_found;
52435- atomic_t xid_busy;
52436- atomic_t seq_not_found;
52437- atomic_t non_bls_resp;
52438+ atomic_unchecked_t no_free_exch;
52439+ atomic_unchecked_t no_free_exch_xid;
52440+ atomic_unchecked_t xid_not_found;
52441+ atomic_unchecked_t xid_busy;
52442+ atomic_unchecked_t seq_not_found;
52443+ atomic_unchecked_t non_bls_resp;
52444 } stats;
52445 };
52446
52447@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
52448 /* allocate memory for exchange */
52449 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
52450 if (!ep) {
52451- atomic_inc(&mp->stats.no_free_exch);
52452+ atomic_inc_unchecked(&mp->stats.no_free_exch);
52453 goto out;
52454 }
52455 memset(ep, 0, sizeof(*ep));
52456@@ -874,7 +874,7 @@ out:
52457 return ep;
52458 err:
52459 spin_unlock_bh(&pool->lock);
52460- atomic_inc(&mp->stats.no_free_exch_xid);
52461+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
52462 mempool_free(ep, mp->ep_pool);
52463 return NULL;
52464 }
52465@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52466 xid = ntohs(fh->fh_ox_id); /* we originated exch */
52467 ep = fc_exch_find(mp, xid);
52468 if (!ep) {
52469- atomic_inc(&mp->stats.xid_not_found);
52470+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52471 reject = FC_RJT_OX_ID;
52472 goto out;
52473 }
52474@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52475 ep = fc_exch_find(mp, xid);
52476 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
52477 if (ep) {
52478- atomic_inc(&mp->stats.xid_busy);
52479+ atomic_inc_unchecked(&mp->stats.xid_busy);
52480 reject = FC_RJT_RX_ID;
52481 goto rel;
52482 }
52483@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52484 }
52485 xid = ep->xid; /* get our XID */
52486 } else if (!ep) {
52487- atomic_inc(&mp->stats.xid_not_found);
52488+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52489 reject = FC_RJT_RX_ID; /* XID not found */
52490 goto out;
52491 }
52492@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52493 } else {
52494 sp = &ep->seq;
52495 if (sp->id != fh->fh_seq_id) {
52496- atomic_inc(&mp->stats.seq_not_found);
52497+ atomic_inc_unchecked(&mp->stats.seq_not_found);
52498 if (f_ctl & FC_FC_END_SEQ) {
52499 /*
52500 * Update sequence_id based on incoming last
52501@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52502
52503 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
52504 if (!ep) {
52505- atomic_inc(&mp->stats.xid_not_found);
52506+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52507 goto out;
52508 }
52509 if (ep->esb_stat & ESB_ST_COMPLETE) {
52510- atomic_inc(&mp->stats.xid_not_found);
52511+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52512 goto rel;
52513 }
52514 if (ep->rxid == FC_XID_UNKNOWN)
52515 ep->rxid = ntohs(fh->fh_rx_id);
52516 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
52517- atomic_inc(&mp->stats.xid_not_found);
52518+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52519 goto rel;
52520 }
52521 if (ep->did != ntoh24(fh->fh_s_id) &&
52522 ep->did != FC_FID_FLOGI) {
52523- atomic_inc(&mp->stats.xid_not_found);
52524+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52525 goto rel;
52526 }
52527 sof = fr_sof(fp);
52528@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52529 sp->ssb_stat |= SSB_ST_RESP;
52530 sp->id = fh->fh_seq_id;
52531 } else if (sp->id != fh->fh_seq_id) {
52532- atomic_inc(&mp->stats.seq_not_found);
52533+ atomic_inc_unchecked(&mp->stats.seq_not_found);
52534 goto rel;
52535 }
52536
52537@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52538 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
52539
52540 if (!sp)
52541- atomic_inc(&mp->stats.xid_not_found);
52542+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52543 else
52544- atomic_inc(&mp->stats.non_bls_resp);
52545+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
52546
52547 fc_frame_free(fp);
52548 }
52549@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
52550
52551 list_for_each_entry(ema, &lport->ema_list, ema_list) {
52552 mp = ema->mp;
52553- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
52554+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
52555 st->fc_no_free_exch_xid +=
52556- atomic_read(&mp->stats.no_free_exch_xid);
52557- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
52558- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
52559- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
52560- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
52561+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
52562+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
52563+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
52564+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
52565+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
52566 }
52567 }
52568 EXPORT_SYMBOL(fc_exch_update_stats);
52569diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
52570index 9c706d8..d3e3ed2 100644
52571--- a/drivers/scsi/libsas/sas_ata.c
52572+++ b/drivers/scsi/libsas/sas_ata.c
52573@@ -535,7 +535,7 @@ static struct ata_port_operations sas_sata_ops = {
52574 .postreset = ata_std_postreset,
52575 .error_handler = ata_std_error_handler,
52576 .post_internal_cmd = sas_ata_post_internal,
52577- .qc_defer = ata_std_qc_defer,
52578+ .qc_defer = ata_std_qc_defer,
52579 .qc_prep = ata_noop_qc_prep,
52580 .qc_issue = sas_ata_qc_issue,
52581 .qc_fill_rtf = sas_ata_qc_fill_rtf,
52582diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
52583index 434e903..5a4a79b 100644
52584--- a/drivers/scsi/lpfc/lpfc.h
52585+++ b/drivers/scsi/lpfc/lpfc.h
52586@@ -430,7 +430,7 @@ struct lpfc_vport {
52587 struct dentry *debug_nodelist;
52588 struct dentry *vport_debugfs_root;
52589 struct lpfc_debugfs_trc *disc_trc;
52590- atomic_t disc_trc_cnt;
52591+ atomic_unchecked_t disc_trc_cnt;
52592 #endif
52593 uint8_t stat_data_enabled;
52594 uint8_t stat_data_blocked;
52595@@ -880,8 +880,8 @@ struct lpfc_hba {
52596 struct timer_list fabric_block_timer;
52597 unsigned long bit_flags;
52598 #define FABRIC_COMANDS_BLOCKED 0
52599- atomic_t num_rsrc_err;
52600- atomic_t num_cmd_success;
52601+ atomic_unchecked_t num_rsrc_err;
52602+ atomic_unchecked_t num_cmd_success;
52603 unsigned long last_rsrc_error_time;
52604 unsigned long last_ramp_down_time;
52605 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
52606@@ -916,7 +916,7 @@ struct lpfc_hba {
52607
52608 struct dentry *debug_slow_ring_trc;
52609 struct lpfc_debugfs_trc *slow_ring_trc;
52610- atomic_t slow_ring_trc_cnt;
52611+ atomic_unchecked_t slow_ring_trc_cnt;
52612 /* iDiag debugfs sub-directory */
52613 struct dentry *idiag_root;
52614 struct dentry *idiag_pci_cfg;
52615diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
52616index 5633e7d..8272114 100644
52617--- a/drivers/scsi/lpfc/lpfc_debugfs.c
52618+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
52619@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
52620
52621 #include <linux/debugfs.h>
52622
52623-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
52624+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
52625 static unsigned long lpfc_debugfs_start_time = 0L;
52626
52627 /* iDiag */
52628@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
52629 lpfc_debugfs_enable = 0;
52630
52631 len = 0;
52632- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
52633+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
52634 (lpfc_debugfs_max_disc_trc - 1);
52635 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
52636 dtp = vport->disc_trc + i;
52637@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
52638 lpfc_debugfs_enable = 0;
52639
52640 len = 0;
52641- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
52642+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
52643 (lpfc_debugfs_max_slow_ring_trc - 1);
52644 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
52645 dtp = phba->slow_ring_trc + i;
52646@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
52647 !vport || !vport->disc_trc)
52648 return;
52649
52650- index = atomic_inc_return(&vport->disc_trc_cnt) &
52651+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
52652 (lpfc_debugfs_max_disc_trc - 1);
52653 dtp = vport->disc_trc + index;
52654 dtp->fmt = fmt;
52655 dtp->data1 = data1;
52656 dtp->data2 = data2;
52657 dtp->data3 = data3;
52658- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
52659+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
52660 dtp->jif = jiffies;
52661 #endif
52662 return;
52663@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
52664 !phba || !phba->slow_ring_trc)
52665 return;
52666
52667- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
52668+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
52669 (lpfc_debugfs_max_slow_ring_trc - 1);
52670 dtp = phba->slow_ring_trc + index;
52671 dtp->fmt = fmt;
52672 dtp->data1 = data1;
52673 dtp->data2 = data2;
52674 dtp->data3 = data3;
52675- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
52676+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
52677 dtp->jif = jiffies;
52678 #endif
52679 return;
52680@@ -4268,7 +4268,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
52681 "slow_ring buffer\n");
52682 goto debug_failed;
52683 }
52684- atomic_set(&phba->slow_ring_trc_cnt, 0);
52685+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
52686 memset(phba->slow_ring_trc, 0,
52687 (sizeof(struct lpfc_debugfs_trc) *
52688 lpfc_debugfs_max_slow_ring_trc));
52689@@ -4314,7 +4314,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
52690 "buffer\n");
52691 goto debug_failed;
52692 }
52693- atomic_set(&vport->disc_trc_cnt, 0);
52694+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
52695
52696 snprintf(name, sizeof(name), "discovery_trace");
52697 vport->debug_disc_trc =
52698diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
52699index 0b2c53a..aec2b45 100644
52700--- a/drivers/scsi/lpfc/lpfc_init.c
52701+++ b/drivers/scsi/lpfc/lpfc_init.c
52702@@ -11290,8 +11290,10 @@ lpfc_init(void)
52703 "misc_register returned with status %d", error);
52704
52705 if (lpfc_enable_npiv) {
52706- lpfc_transport_functions.vport_create = lpfc_vport_create;
52707- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
52708+ pax_open_kernel();
52709+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
52710+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
52711+ pax_close_kernel();
52712 }
52713 lpfc_transport_template =
52714 fc_attach_transport(&lpfc_transport_functions);
52715diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
52716index 4f9222e..f1850e3 100644
52717--- a/drivers/scsi/lpfc/lpfc_scsi.c
52718+++ b/drivers/scsi/lpfc/lpfc_scsi.c
52719@@ -261,7 +261,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
52720 unsigned long expires;
52721
52722 spin_lock_irqsave(&phba->hbalock, flags);
52723- atomic_inc(&phba->num_rsrc_err);
52724+ atomic_inc_unchecked(&phba->num_rsrc_err);
52725 phba->last_rsrc_error_time = jiffies;
52726
52727 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
52728@@ -303,8 +303,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
52729 unsigned long num_rsrc_err, num_cmd_success;
52730 int i;
52731
52732- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
52733- num_cmd_success = atomic_read(&phba->num_cmd_success);
52734+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
52735+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
52736
52737 /*
52738 * The error and success command counters are global per
52739@@ -331,8 +331,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
52740 }
52741 }
52742 lpfc_destroy_vport_work_array(phba, vports);
52743- atomic_set(&phba->num_rsrc_err, 0);
52744- atomic_set(&phba->num_cmd_success, 0);
52745+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
52746+ atomic_set_unchecked(&phba->num_cmd_success, 0);
52747 }
52748
52749 /**
52750diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52751index 3f26147..ee8efd1 100644
52752--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52753+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52754@@ -1509,7 +1509,7 @@ _scsih_get_resync(struct device *dev)
52755 {
52756 struct scsi_device *sdev = to_scsi_device(dev);
52757 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
52758- static struct _raid_device *raid_device;
52759+ struct _raid_device *raid_device;
52760 unsigned long flags;
52761 Mpi2RaidVolPage0_t vol_pg0;
52762 Mpi2ConfigReply_t mpi_reply;
52763@@ -1561,7 +1561,7 @@ _scsih_get_state(struct device *dev)
52764 {
52765 struct scsi_device *sdev = to_scsi_device(dev);
52766 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
52767- static struct _raid_device *raid_device;
52768+ struct _raid_device *raid_device;
52769 unsigned long flags;
52770 Mpi2RaidVolPage0_t vol_pg0;
52771 Mpi2ConfigReply_t mpi_reply;
52772@@ -6641,7 +6641,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
52773 Mpi2EventDataIrOperationStatus_t *event_data =
52774 (Mpi2EventDataIrOperationStatus_t *)
52775 fw_event->event_data;
52776- static struct _raid_device *raid_device;
52777+ struct _raid_device *raid_device;
52778 unsigned long flags;
52779 u16 handle;
52780
52781@@ -7112,7 +7112,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
52782 u64 sas_address;
52783 struct _sas_device *sas_device;
52784 struct _sas_node *expander_device;
52785- static struct _raid_device *raid_device;
52786+ struct _raid_device *raid_device;
52787 u8 retry_count;
52788 unsigned long flags;
52789
52790diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
52791index ed31d8c..ab856b3 100644
52792--- a/drivers/scsi/pmcraid.c
52793+++ b/drivers/scsi/pmcraid.c
52794@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
52795 res->scsi_dev = scsi_dev;
52796 scsi_dev->hostdata = res;
52797 res->change_detected = 0;
52798- atomic_set(&res->read_failures, 0);
52799- atomic_set(&res->write_failures, 0);
52800+ atomic_set_unchecked(&res->read_failures, 0);
52801+ atomic_set_unchecked(&res->write_failures, 0);
52802 rc = 0;
52803 }
52804 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
52805@@ -2640,9 +2640,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
52806
52807 /* If this was a SCSI read/write command keep count of errors */
52808 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
52809- atomic_inc(&res->read_failures);
52810+ atomic_inc_unchecked(&res->read_failures);
52811 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
52812- atomic_inc(&res->write_failures);
52813+ atomic_inc_unchecked(&res->write_failures);
52814
52815 if (!RES_IS_GSCSI(res->cfg_entry) &&
52816 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
52817@@ -3468,7 +3468,7 @@ static int pmcraid_queuecommand_lck(
52818 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
52819 * hrrq_id assigned here in queuecommand
52820 */
52821- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
52822+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
52823 pinstance->num_hrrq;
52824 cmd->cmd_done = pmcraid_io_done;
52825
52826@@ -3782,7 +3782,7 @@ static long pmcraid_ioctl_passthrough(
52827 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
52828 * hrrq_id assigned here in queuecommand
52829 */
52830- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
52831+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
52832 pinstance->num_hrrq;
52833
52834 if (request_size) {
52835@@ -4420,7 +4420,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
52836
52837 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
52838 /* add resources only after host is added into system */
52839- if (!atomic_read(&pinstance->expose_resources))
52840+ if (!atomic_read_unchecked(&pinstance->expose_resources))
52841 return;
52842
52843 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
52844@@ -5237,8 +5237,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
52845 init_waitqueue_head(&pinstance->reset_wait_q);
52846
52847 atomic_set(&pinstance->outstanding_cmds, 0);
52848- atomic_set(&pinstance->last_message_id, 0);
52849- atomic_set(&pinstance->expose_resources, 0);
52850+ atomic_set_unchecked(&pinstance->last_message_id, 0);
52851+ atomic_set_unchecked(&pinstance->expose_resources, 0);
52852
52853 INIT_LIST_HEAD(&pinstance->free_res_q);
52854 INIT_LIST_HEAD(&pinstance->used_res_q);
52855@@ -5951,7 +5951,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
52856 /* Schedule worker thread to handle CCN and take care of adding and
52857 * removing devices to OS
52858 */
52859- atomic_set(&pinstance->expose_resources, 1);
52860+ atomic_set_unchecked(&pinstance->expose_resources, 1);
52861 schedule_work(&pinstance->worker_q);
52862 return rc;
52863
52864diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
52865index e1d150f..6c6df44 100644
52866--- a/drivers/scsi/pmcraid.h
52867+++ b/drivers/scsi/pmcraid.h
52868@@ -748,7 +748,7 @@ struct pmcraid_instance {
52869 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
52870
52871 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
52872- atomic_t last_message_id;
52873+ atomic_unchecked_t last_message_id;
52874
52875 /* configuration table */
52876 struct pmcraid_config_table *cfg_table;
52877@@ -777,7 +777,7 @@ struct pmcraid_instance {
52878 atomic_t outstanding_cmds;
52879
52880 /* should add/delete resources to mid-layer now ?*/
52881- atomic_t expose_resources;
52882+ atomic_unchecked_t expose_resources;
52883
52884
52885
52886@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
52887 struct pmcraid_config_table_entry_ext cfg_entry_ext;
52888 };
52889 struct scsi_device *scsi_dev; /* Link scsi_device structure */
52890- atomic_t read_failures; /* count of failed READ commands */
52891- atomic_t write_failures; /* count of failed WRITE commands */
52892+ atomic_unchecked_t read_failures; /* count of failed READ commands */
52893+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
52894
52895 /* To indicate add/delete/modify during CCN */
52896 u8 change_detected;
52897diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
52898index 82b92c4..3178171 100644
52899--- a/drivers/scsi/qla2xxx/qla_attr.c
52900+++ b/drivers/scsi/qla2xxx/qla_attr.c
52901@@ -2192,7 +2192,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
52902 return 0;
52903 }
52904
52905-struct fc_function_template qla2xxx_transport_functions = {
52906+fc_function_template_no_const qla2xxx_transport_functions = {
52907
52908 .show_host_node_name = 1,
52909 .show_host_port_name = 1,
52910@@ -2240,7 +2240,7 @@ struct fc_function_template qla2xxx_transport_functions = {
52911 .bsg_timeout = qla24xx_bsg_timeout,
52912 };
52913
52914-struct fc_function_template qla2xxx_transport_vport_functions = {
52915+fc_function_template_no_const qla2xxx_transport_vport_functions = {
52916
52917 .show_host_node_name = 1,
52918 .show_host_port_name = 1,
52919diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
52920index 7686bfe..4710893 100644
52921--- a/drivers/scsi/qla2xxx/qla_gbl.h
52922+++ b/drivers/scsi/qla2xxx/qla_gbl.h
52923@@ -571,8 +571,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
52924 struct device_attribute;
52925 extern struct device_attribute *qla2x00_host_attrs[];
52926 struct fc_function_template;
52927-extern struct fc_function_template qla2xxx_transport_functions;
52928-extern struct fc_function_template qla2xxx_transport_vport_functions;
52929+extern fc_function_template_no_const qla2xxx_transport_functions;
52930+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
52931 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
52932 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
52933 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
52934diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
52935index cce1cbc..5b9f0fe 100644
52936--- a/drivers/scsi/qla2xxx/qla_os.c
52937+++ b/drivers/scsi/qla2xxx/qla_os.c
52938@@ -1435,8 +1435,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
52939 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
52940 /* Ok, a 64bit DMA mask is applicable. */
52941 ha->flags.enable_64bit_addressing = 1;
52942- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
52943- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
52944+ pax_open_kernel();
52945+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
52946+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
52947+ pax_close_kernel();
52948 return;
52949 }
52950 }
52951diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
52952index 8f6d0fb..1b21097 100644
52953--- a/drivers/scsi/qla4xxx/ql4_def.h
52954+++ b/drivers/scsi/qla4xxx/ql4_def.h
52955@@ -305,7 +305,7 @@ struct ddb_entry {
52956 * (4000 only) */
52957 atomic_t relogin_timer; /* Max Time to wait for
52958 * relogin to complete */
52959- atomic_t relogin_retry_count; /* Num of times relogin has been
52960+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
52961 * retried */
52962 uint32_t default_time2wait; /* Default Min time between
52963 * relogins (+aens) */
52964diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
52965index 6d25879..3031a9f 100644
52966--- a/drivers/scsi/qla4xxx/ql4_os.c
52967+++ b/drivers/scsi/qla4xxx/ql4_os.c
52968@@ -4491,12 +4491,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
52969 */
52970 if (!iscsi_is_session_online(cls_sess)) {
52971 /* Reset retry relogin timer */
52972- atomic_inc(&ddb_entry->relogin_retry_count);
52973+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
52974 DEBUG2(ql4_printk(KERN_INFO, ha,
52975 "%s: index[%d] relogin timed out-retrying"
52976 " relogin (%d), retry (%d)\n", __func__,
52977 ddb_entry->fw_ddb_index,
52978- atomic_read(&ddb_entry->relogin_retry_count),
52979+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
52980 ddb_entry->default_time2wait + 4));
52981 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
52982 atomic_set(&ddb_entry->retry_relogin_timer,
52983@@ -6604,7 +6604,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
52984
52985 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
52986 atomic_set(&ddb_entry->relogin_timer, 0);
52987- atomic_set(&ddb_entry->relogin_retry_count, 0);
52988+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
52989 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
52990 ddb_entry->default_relogin_timeout =
52991 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
52992diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
52993index b1a2631..5bcd9c8 100644
52994--- a/drivers/scsi/scsi_lib.c
52995+++ b/drivers/scsi/scsi_lib.c
52996@@ -1597,7 +1597,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
52997 shost = sdev->host;
52998 scsi_init_cmd_errh(cmd);
52999 cmd->result = DID_NO_CONNECT << 16;
53000- atomic_inc(&cmd->device->iorequest_cnt);
53001+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
53002
53003 /*
53004 * SCSI request completion path will do scsi_device_unbusy(),
53005@@ -1620,9 +1620,9 @@ static void scsi_softirq_done(struct request *rq)
53006
53007 INIT_LIST_HEAD(&cmd->eh_entry);
53008
53009- atomic_inc(&cmd->device->iodone_cnt);
53010+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
53011 if (cmd->result)
53012- atomic_inc(&cmd->device->ioerr_cnt);
53013+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
53014
53015 disposition = scsi_decide_disposition(cmd);
53016 if (disposition != SUCCESS &&
53017@@ -1663,7 +1663,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
53018 struct Scsi_Host *host = cmd->device->host;
53019 int rtn = 0;
53020
53021- atomic_inc(&cmd->device->iorequest_cnt);
53022+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
53023
53024 /* check if the device is still usable */
53025 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
53026diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
53027index 1ac38e7..6acc656 100644
53028--- a/drivers/scsi/scsi_sysfs.c
53029+++ b/drivers/scsi/scsi_sysfs.c
53030@@ -788,7 +788,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
53031 char *buf) \
53032 { \
53033 struct scsi_device *sdev = to_scsi_device(dev); \
53034- unsigned long long count = atomic_read(&sdev->field); \
53035+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
53036 return snprintf(buf, 20, "0x%llx\n", count); \
53037 } \
53038 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
53039diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
53040index 5d6f348..18778a6b 100644
53041--- a/drivers/scsi/scsi_transport_fc.c
53042+++ b/drivers/scsi/scsi_transport_fc.c
53043@@ -501,7 +501,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
53044 * Netlink Infrastructure
53045 */
53046
53047-static atomic_t fc_event_seq;
53048+static atomic_unchecked_t fc_event_seq;
53049
53050 /**
53051 * fc_get_event_number - Obtain the next sequential FC event number
53052@@ -514,7 +514,7 @@ static atomic_t fc_event_seq;
53053 u32
53054 fc_get_event_number(void)
53055 {
53056- return atomic_add_return(1, &fc_event_seq);
53057+ return atomic_add_return_unchecked(1, &fc_event_seq);
53058 }
53059 EXPORT_SYMBOL(fc_get_event_number);
53060
53061@@ -658,7 +658,7 @@ static __init int fc_transport_init(void)
53062 {
53063 int error;
53064
53065- atomic_set(&fc_event_seq, 0);
53066+ atomic_set_unchecked(&fc_event_seq, 0);
53067
53068 error = transport_class_register(&fc_host_class);
53069 if (error)
53070@@ -848,7 +848,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
53071 char *cp;
53072
53073 *val = simple_strtoul(buf, &cp, 0);
53074- if ((*cp && (*cp != '\n')) || (*val < 0))
53075+ if (*cp && (*cp != '\n'))
53076 return -EINVAL;
53077 /*
53078 * Check for overflow; dev_loss_tmo is u32
53079diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
53080index 67d43e3..8cee73c 100644
53081--- a/drivers/scsi/scsi_transport_iscsi.c
53082+++ b/drivers/scsi/scsi_transport_iscsi.c
53083@@ -79,7 +79,7 @@ struct iscsi_internal {
53084 struct transport_container session_cont;
53085 };
53086
53087-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
53088+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
53089 static struct workqueue_struct *iscsi_eh_timer_workq;
53090
53091 static DEFINE_IDA(iscsi_sess_ida);
53092@@ -2071,7 +2071,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
53093 int err;
53094
53095 ihost = shost->shost_data;
53096- session->sid = atomic_add_return(1, &iscsi_session_nr);
53097+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
53098
53099 if (target_id == ISCSI_MAX_TARGET) {
53100 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
53101@@ -4515,7 +4515,7 @@ static __init int iscsi_transport_init(void)
53102 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
53103 ISCSI_TRANSPORT_VERSION);
53104
53105- atomic_set(&iscsi_session_nr, 0);
53106+ atomic_set_unchecked(&iscsi_session_nr, 0);
53107
53108 err = class_register(&iscsi_transport_class);
53109 if (err)
53110diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
53111index ae45bd9..c32a586 100644
53112--- a/drivers/scsi/scsi_transport_srp.c
53113+++ b/drivers/scsi/scsi_transport_srp.c
53114@@ -35,7 +35,7 @@
53115 #include "scsi_priv.h"
53116
53117 struct srp_host_attrs {
53118- atomic_t next_port_id;
53119+ atomic_unchecked_t next_port_id;
53120 };
53121 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
53122
53123@@ -100,7 +100,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
53124 struct Scsi_Host *shost = dev_to_shost(dev);
53125 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
53126
53127- atomic_set(&srp_host->next_port_id, 0);
53128+ atomic_set_unchecked(&srp_host->next_port_id, 0);
53129 return 0;
53130 }
53131
53132@@ -734,7 +734,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
53133 rport_fast_io_fail_timedout);
53134 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
53135
53136- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
53137+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
53138 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
53139
53140 transport_setup_device(&rport->dev);
53141diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
53142index 3290a3e..d65ac1c 100644
53143--- a/drivers/scsi/sd.c
53144+++ b/drivers/scsi/sd.c
53145@@ -3006,7 +3006,7 @@ static int sd_probe(struct device *dev)
53146 sdkp->disk = gd;
53147 sdkp->index = index;
53148 atomic_set(&sdkp->openers, 0);
53149- atomic_set(&sdkp->device->ioerr_cnt, 0);
53150+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
53151
53152 if (!sdp->request_queue->rq_timeout) {
53153 if (sdp->type != TYPE_MOD)
53154diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
53155index 2270bd5..98408a5 100644
53156--- a/drivers/scsi/sg.c
53157+++ b/drivers/scsi/sg.c
53158@@ -1083,7 +1083,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
53159 sdp->disk->disk_name,
53160 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
53161 NULL,
53162- (char *)arg);
53163+ (char __user *)arg);
53164 case BLKTRACESTART:
53165 return blk_trace_startstop(sdp->device->request_queue, 1);
53166 case BLKTRACESTOP:
53167diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
53168index c0d660f..24a5854 100644
53169--- a/drivers/soc/tegra/fuse/fuse-tegra.c
53170+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
53171@@ -71,7 +71,7 @@ static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
53172 return i;
53173 }
53174
53175-static struct bin_attribute fuse_bin_attr = {
53176+static bin_attribute_no_const fuse_bin_attr = {
53177 .attr = { .name = "fuse", .mode = S_IRUGO, },
53178 .read = fuse_read,
53179 };
53180diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
53181index 57a1950..ae54e21 100644
53182--- a/drivers/spi/spi.c
53183+++ b/drivers/spi/spi.c
53184@@ -2307,7 +2307,7 @@ int spi_bus_unlock(struct spi_master *master)
53185 EXPORT_SYMBOL_GPL(spi_bus_unlock);
53186
53187 /* portable code must never pass more than 32 bytes */
53188-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
53189+#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
53190
53191 static u8 *buf;
53192
53193diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
53194index b41429f..2de5373 100644
53195--- a/drivers/staging/android/timed_output.c
53196+++ b/drivers/staging/android/timed_output.c
53197@@ -25,7 +25,7 @@
53198 #include "timed_output.h"
53199
53200 static struct class *timed_output_class;
53201-static atomic_t device_count;
53202+static atomic_unchecked_t device_count;
53203
53204 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
53205 char *buf)
53206@@ -65,7 +65,7 @@ static int create_timed_output_class(void)
53207 timed_output_class = class_create(THIS_MODULE, "timed_output");
53208 if (IS_ERR(timed_output_class))
53209 return PTR_ERR(timed_output_class);
53210- atomic_set(&device_count, 0);
53211+ atomic_set_unchecked(&device_count, 0);
53212 timed_output_class->dev_groups = timed_output_groups;
53213 }
53214
53215@@ -83,7 +83,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
53216 if (ret < 0)
53217 return ret;
53218
53219- tdev->index = atomic_inc_return(&device_count);
53220+ tdev->index = atomic_inc_return_unchecked(&device_count);
53221 tdev->dev = device_create(timed_output_class, NULL,
53222 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
53223 if (IS_ERR(tdev->dev))
53224diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
53225index 727640e..55bf61c 100644
53226--- a/drivers/staging/comedi/comedi_fops.c
53227+++ b/drivers/staging/comedi/comedi_fops.c
53228@@ -297,8 +297,8 @@ static void comedi_file_reset(struct file *file)
53229 }
53230 cfp->last_attached = dev->attached;
53231 cfp->last_detach_count = dev->detach_count;
53232- ACCESS_ONCE(cfp->read_subdev) = read_s;
53233- ACCESS_ONCE(cfp->write_subdev) = write_s;
53234+ ACCESS_ONCE_RW(cfp->read_subdev) = read_s;
53235+ ACCESS_ONCE_RW(cfp->write_subdev) = write_s;
53236 }
53237
53238 static void comedi_file_check(struct file *file)
53239@@ -1924,7 +1924,7 @@ static int do_setrsubd_ioctl(struct comedi_device *dev, unsigned long arg,
53240 !(s_old->async->cmd.flags & CMDF_WRITE))
53241 return -EBUSY;
53242
53243- ACCESS_ONCE(cfp->read_subdev) = s_new;
53244+ ACCESS_ONCE_RW(cfp->read_subdev) = s_new;
53245 return 0;
53246 }
53247
53248@@ -1966,7 +1966,7 @@ static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
53249 (s_old->async->cmd.flags & CMDF_WRITE))
53250 return -EBUSY;
53251
53252- ACCESS_ONCE(cfp->write_subdev) = s_new;
53253+ ACCESS_ONCE_RW(cfp->write_subdev) = s_new;
53254 return 0;
53255 }
53256
53257diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
53258index 37dcf7e..f3c2016 100644
53259--- a/drivers/staging/fbtft/fbtft-core.c
53260+++ b/drivers/staging/fbtft/fbtft-core.c
53261@@ -689,7 +689,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
53262 {
53263 struct fb_info *info;
53264 struct fbtft_par *par;
53265- struct fb_ops *fbops = NULL;
53266+ fb_ops_no_const *fbops = NULL;
53267 struct fb_deferred_io *fbdefio = NULL;
53268 struct fbtft_platform_data *pdata = dev->platform_data;
53269 u8 *vmem = NULL;
53270diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
53271index 0dbf3f9..fed0063 100644
53272--- a/drivers/staging/fbtft/fbtft.h
53273+++ b/drivers/staging/fbtft/fbtft.h
53274@@ -106,7 +106,7 @@ struct fbtft_ops {
53275
53276 int (*set_var)(struct fbtft_par *par);
53277 int (*set_gamma)(struct fbtft_par *par, unsigned long *curves);
53278-};
53279+} __no_const;
53280
53281 /**
53282 * struct fbtft_display - Describes the display properties
53283diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
53284index 001348c..cfaac8a 100644
53285--- a/drivers/staging/gdm724x/gdm_tty.c
53286+++ b/drivers/staging/gdm724x/gdm_tty.c
53287@@ -44,7 +44,7 @@
53288 #define gdm_tty_send_control(n, r, v, d, l) (\
53289 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
53290
53291-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
53292+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
53293
53294 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
53295 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
53296diff --git a/drivers/staging/i2o/i2o.h b/drivers/staging/i2o/i2o.h
53297index d23c3c2..eb63c81 100644
53298--- a/drivers/staging/i2o/i2o.h
53299+++ b/drivers/staging/i2o/i2o.h
53300@@ -565,7 +565,7 @@ struct i2o_controller {
53301 struct i2o_device *exec; /* Executive */
53302 #if BITS_PER_LONG == 64
53303 spinlock_t context_list_lock; /* lock for context_list */
53304- atomic_t context_list_counter; /* needed for unique contexts */
53305+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
53306 struct list_head context_list; /* list of context id's
53307 and pointers */
53308 #endif
53309diff --git a/drivers/staging/i2o/i2o_proc.c b/drivers/staging/i2o/i2o_proc.c
53310index ad84f33..c5bdf65 100644
53311--- a/drivers/staging/i2o/i2o_proc.c
53312+++ b/drivers/staging/i2o/i2o_proc.c
53313@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
53314 "Array Controller Device"
53315 };
53316
53317-static char *chtostr(char *tmp, u8 *chars, int n)
53318-{
53319- tmp[0] = 0;
53320- return strncat(tmp, (char *)chars, n);
53321-}
53322-
53323 static int i2o_report_query_status(struct seq_file *seq, int block_status,
53324 char *group)
53325 {
53326@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
53327 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
53328 {
53329 struct i2o_controller *c = (struct i2o_controller *)seq->private;
53330- static u32 work32[5];
53331- static u8 *work8 = (u8 *) work32;
53332- static u16 *work16 = (u16 *) work32;
53333+ u32 work32[5];
53334+ u8 *work8 = (u8 *) work32;
53335+ u16 *work16 = (u16 *) work32;
53336 int token;
53337 u32 hwcap;
53338
53339@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
53340 } *result;
53341
53342 i2o_exec_execute_ddm_table ddm_table;
53343- char tmp[28 + 1];
53344
53345 result = kmalloc(sizeof(*result), GFP_KERNEL);
53346 if (!result)
53347@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
53348
53349 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
53350 seq_printf(seq, "%-#8x", ddm_table.module_id);
53351- seq_printf(seq, "%-29s",
53352- chtostr(tmp, ddm_table.module_name_version, 28));
53353+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
53354 seq_printf(seq, "%9d ", ddm_table.data_size);
53355 seq_printf(seq, "%8d", ddm_table.code_size);
53356
53357@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
53358
53359 i2o_driver_result_table *result;
53360 i2o_driver_store_table *dst;
53361- char tmp[28 + 1];
53362
53363 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
53364 if (result == NULL)
53365@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
53366
53367 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
53368 seq_printf(seq, "%-#8x", dst->module_id);
53369- seq_printf(seq, "%-29s",
53370- chtostr(tmp, dst->module_name_version, 28));
53371- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
53372+ seq_printf(seq, "%-.28s", dst->module_name_version);
53373+ seq_printf(seq, "%-.8s", dst->date);
53374 seq_printf(seq, "%8d ", dst->module_size);
53375 seq_printf(seq, "%8d ", dst->mpb_size);
53376 seq_printf(seq, "0x%04x", dst->module_flags);
53377@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
53378 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
53379 {
53380 struct i2o_device *d = (struct i2o_device *)seq->private;
53381- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
53382+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
53383 // == (allow) 512d bytes (max)
53384- static u16 *work16 = (u16 *) work32;
53385+ u16 *work16 = (u16 *) work32;
53386 int token;
53387- char tmp[16 + 1];
53388
53389 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
53390
53391@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
53392 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
53393 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
53394 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
53395- seq_printf(seq, "Vendor info : %s\n",
53396- chtostr(tmp, (u8 *) (work32 + 2), 16));
53397- seq_printf(seq, "Product info : %s\n",
53398- chtostr(tmp, (u8 *) (work32 + 6), 16));
53399- seq_printf(seq, "Description : %s\n",
53400- chtostr(tmp, (u8 *) (work32 + 10), 16));
53401- seq_printf(seq, "Product rev. : %s\n",
53402- chtostr(tmp, (u8 *) (work32 + 14), 8));
53403+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
53404+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
53405+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
53406+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
53407
53408 seq_printf(seq, "Serial number : ");
53409 print_serial_number(seq, (u8 *) (work32 + 16),
53410@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
53411 u8 pad[256]; // allow up to 256 byte (max) serial number
53412 } result;
53413
53414- char tmp[24 + 1];
53415-
53416 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
53417
53418 if (token < 0) {
53419@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
53420 }
53421
53422 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
53423- seq_printf(seq, "Module name : %s\n",
53424- chtostr(tmp, result.module_name, 24));
53425- seq_printf(seq, "Module revision : %s\n",
53426- chtostr(tmp, result.module_rev, 8));
53427+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
53428+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
53429
53430 seq_printf(seq, "Serial number : ");
53431 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
53432@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
53433 u8 instance_number[4];
53434 } result;
53435
53436- char tmp[64 + 1];
53437-
53438 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
53439
53440 if (token < 0) {
53441@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
53442 return 0;
53443 }
53444
53445- seq_printf(seq, "Device name : %s\n",
53446- chtostr(tmp, result.device_name, 64));
53447- seq_printf(seq, "Service name : %s\n",
53448- chtostr(tmp, result.service_name, 64));
53449- seq_printf(seq, "Physical name : %s\n",
53450- chtostr(tmp, result.physical_location, 64));
53451- seq_printf(seq, "Instance number : %s\n",
53452- chtostr(tmp, result.instance_number, 4));
53453+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
53454+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
53455+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
53456+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
53457
53458 return 0;
53459 }
53460@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
53461 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
53462 {
53463 struct i2o_device *d = (struct i2o_device *)seq->private;
53464- static u32 work32[12];
53465- static u16 *work16 = (u16 *) work32;
53466- static u8 *work8 = (u8 *) work32;
53467+ u32 work32[12];
53468+ u16 *work16 = (u16 *) work32;
53469+ u8 *work8 = (u8 *) work32;
53470 int token;
53471
53472 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
53473diff --git a/drivers/staging/i2o/iop.c b/drivers/staging/i2o/iop.c
53474index 52334fc..d7f40b3 100644
53475--- a/drivers/staging/i2o/iop.c
53476+++ b/drivers/staging/i2o/iop.c
53477@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
53478
53479 spin_lock_irqsave(&c->context_list_lock, flags);
53480
53481- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
53482- atomic_inc(&c->context_list_counter);
53483+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
53484+ atomic_inc_unchecked(&c->context_list_counter);
53485
53486- entry->context = atomic_read(&c->context_list_counter);
53487+ entry->context = atomic_read_unchecked(&c->context_list_counter);
53488
53489 list_add(&entry->list, &c->context_list);
53490
53491@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
53492
53493 #if BITS_PER_LONG == 64
53494 spin_lock_init(&c->context_list_lock);
53495- atomic_set(&c->context_list_counter, 0);
53496+ atomic_set_unchecked(&c->context_list_counter, 0);
53497 INIT_LIST_HEAD(&c->context_list);
53498 #endif
53499
53500diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
53501index 463da07..e791ce9 100644
53502--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
53503+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
53504@@ -488,13 +488,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
53505 return 0;
53506 }
53507
53508-sfw_test_client_ops_t brw_test_client;
53509-void brw_init_test_client(void)
53510-{
53511- brw_test_client.tso_init = brw_client_init;
53512- brw_test_client.tso_fini = brw_client_fini;
53513- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
53514- brw_test_client.tso_done_rpc = brw_client_done_rpc;
53515+sfw_test_client_ops_t brw_test_client = {
53516+ .tso_init = brw_client_init,
53517+ .tso_fini = brw_client_fini,
53518+ .tso_prep_rpc = brw_client_prep_rpc,
53519+ .tso_done_rpc = brw_client_done_rpc,
53520 };
53521
53522 srpc_service_t brw_test_service;
53523diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
53524index 5709148..ccd9e0d 100644
53525--- a/drivers/staging/lustre/lnet/selftest/framework.c
53526+++ b/drivers/staging/lustre/lnet/selftest/framework.c
53527@@ -1628,12 +1628,10 @@ static srpc_service_t sfw_services[] = {
53528
53529 extern sfw_test_client_ops_t ping_test_client;
53530 extern srpc_service_t ping_test_service;
53531-extern void ping_init_test_client(void);
53532 extern void ping_init_test_service(void);
53533
53534 extern sfw_test_client_ops_t brw_test_client;
53535 extern srpc_service_t brw_test_service;
53536-extern void brw_init_test_client(void);
53537 extern void brw_init_test_service(void);
53538
53539
53540@@ -1675,12 +1673,10 @@ sfw_startup (void)
53541 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
53542 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
53543
53544- brw_init_test_client();
53545 brw_init_test_service();
53546 rc = sfw_register_test(&brw_test_service, &brw_test_client);
53547 LASSERT (rc == 0);
53548
53549- ping_init_test_client();
53550 ping_init_test_service();
53551 rc = sfw_register_test(&ping_test_service, &ping_test_client);
53552 LASSERT (rc == 0);
53553diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
53554index d8c0df6..5041cbb 100644
53555--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
53556+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
53557@@ -211,14 +211,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
53558 return 0;
53559 }
53560
53561-sfw_test_client_ops_t ping_test_client;
53562-void ping_init_test_client(void)
53563-{
53564- ping_test_client.tso_init = ping_client_init;
53565- ping_test_client.tso_fini = ping_client_fini;
53566- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
53567- ping_test_client.tso_done_rpc = ping_client_done_rpc;
53568-}
53569+sfw_test_client_ops_t ping_test_client = {
53570+ .tso_init = ping_client_init,
53571+ .tso_fini = ping_client_fini,
53572+ .tso_prep_rpc = ping_client_prep_rpc,
53573+ .tso_done_rpc = ping_client_done_rpc,
53574+};
53575
53576 srpc_service_t ping_test_service;
53577 void ping_init_test_service(void)
53578diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
53579index 83bc0a9..12ba00a 100644
53580--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
53581+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
53582@@ -1139,7 +1139,7 @@ struct ldlm_callback_suite {
53583 ldlm_completion_callback lcs_completion;
53584 ldlm_blocking_callback lcs_blocking;
53585 ldlm_glimpse_callback lcs_glimpse;
53586-};
53587+} __no_const;
53588
53589 /* ldlm_lockd.c */
53590 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
53591diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
53592index 2a88b80..62e7e5f 100644
53593--- a/drivers/staging/lustre/lustre/include/obd.h
53594+++ b/drivers/staging/lustre/lustre/include/obd.h
53595@@ -1362,7 +1362,7 @@ struct md_ops {
53596 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
53597 * wrapper function in include/linux/obd_class.h.
53598 */
53599-};
53600+} __no_const;
53601
53602 struct lsm_operations {
53603 void (*lsm_free)(struct lov_stripe_md *);
53604diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53605index a4c252f..b21acac 100644
53606--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53607+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53608@@ -258,7 +258,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
53609 int added = (mode == LCK_NL);
53610 int overlaps = 0;
53611 int splitted = 0;
53612- const struct ldlm_callback_suite null_cbs = { NULL };
53613+ const struct ldlm_callback_suite null_cbs = { };
53614
53615 CDEBUG(D_DLMTRACE,
53616 "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
53617diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53618index c539e37..743b213 100644
53619--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53620+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53621@@ -237,7 +237,7 @@ static int proc_console_max_delay_cs(struct ctl_table *table, int write,
53622 loff_t *ppos)
53623 {
53624 int rc, max_delay_cs;
53625- struct ctl_table dummy = *table;
53626+ ctl_table_no_const dummy = *table;
53627 long d;
53628
53629 dummy.data = &max_delay_cs;
53630@@ -270,7 +270,7 @@ static int proc_console_min_delay_cs(struct ctl_table *table, int write,
53631 loff_t *ppos)
53632 {
53633 int rc, min_delay_cs;
53634- struct ctl_table dummy = *table;
53635+ ctl_table_no_const dummy = *table;
53636 long d;
53637
53638 dummy.data = &min_delay_cs;
53639@@ -302,7 +302,7 @@ static int proc_console_backoff(struct ctl_table *table, int write,
53640 void __user *buffer, size_t *lenp, loff_t *ppos)
53641 {
53642 int rc, backoff;
53643- struct ctl_table dummy = *table;
53644+ ctl_table_no_const dummy = *table;
53645
53646 dummy.data = &backoff;
53647 dummy.proc_handler = &proc_dointvec;
53648diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
53649index 7dc77dd..289d03e 100644
53650--- a/drivers/staging/lustre/lustre/libcfs/module.c
53651+++ b/drivers/staging/lustre/lustre/libcfs/module.c
53652@@ -313,11 +313,11 @@ out:
53653
53654
53655 struct cfs_psdev_ops libcfs_psdev_ops = {
53656- libcfs_psdev_open,
53657- libcfs_psdev_release,
53658- NULL,
53659- NULL,
53660- libcfs_ioctl
53661+ .p_open = libcfs_psdev_open,
53662+ .p_close = libcfs_psdev_release,
53663+ .p_read = NULL,
53664+ .p_write = NULL,
53665+ .p_ioctl = libcfs_ioctl
53666 };
53667
53668 extern int insert_proc(void);
53669diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
53670index 22667db..8b703b6 100644
53671--- a/drivers/staging/octeon/ethernet-rx.c
53672+++ b/drivers/staging/octeon/ethernet-rx.c
53673@@ -354,14 +354,14 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
53674 /* Increment RX stats for virtual ports */
53675 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
53676 #ifdef CONFIG_64BIT
53677- atomic64_add(1,
53678+ atomic64_add_unchecked(1,
53679 (atomic64_t *)&priv->stats.rx_packets);
53680- atomic64_add(skb->len,
53681+ atomic64_add_unchecked(skb->len,
53682 (atomic64_t *)&priv->stats.rx_bytes);
53683 #else
53684- atomic_add(1,
53685+ atomic_add_unchecked(1,
53686 (atomic_t *)&priv->stats.rx_packets);
53687- atomic_add(skb->len,
53688+ atomic_add_unchecked(skb->len,
53689 (atomic_t *)&priv->stats.rx_bytes);
53690 #endif
53691 }
53692@@ -373,10 +373,10 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
53693 dev->name);
53694 */
53695 #ifdef CONFIG_64BIT
53696- atomic64_add(1,
53697+ atomic64_add_unchecked(1,
53698 (atomic64_t *)&priv->stats.rx_dropped);
53699 #else
53700- atomic_add(1,
53701+ atomic_add_unchecked(1,
53702 (atomic_t *)&priv->stats.rx_dropped);
53703 #endif
53704 dev_kfree_skb_irq(skb);
53705diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
53706index 460e854..f926452 100644
53707--- a/drivers/staging/octeon/ethernet.c
53708+++ b/drivers/staging/octeon/ethernet.c
53709@@ -241,11 +241,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
53710 * since the RX tasklet also increments it.
53711 */
53712 #ifdef CONFIG_64BIT
53713- atomic64_add(rx_status.dropped_packets,
53714- (atomic64_t *)&priv->stats.rx_dropped);
53715+ atomic64_add_unchecked(rx_status.dropped_packets,
53716+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
53717 #else
53718- atomic_add(rx_status.dropped_packets,
53719- (atomic_t *)&priv->stats.rx_dropped);
53720+ atomic_add_unchecked(rx_status.dropped_packets,
53721+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
53722 #endif
53723 }
53724
53725diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
53726index 3b476d8..f522d68 100644
53727--- a/drivers/staging/rtl8188eu/include/hal_intf.h
53728+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
53729@@ -225,7 +225,7 @@ struct hal_ops {
53730
53731 void (*hal_notch_filter)(struct adapter *adapter, bool enable);
53732 void (*hal_reset_security_engine)(struct adapter *adapter);
53733-};
53734+} __no_const;
53735
53736 enum rt_eeprom_type {
53737 EEPROM_93C46,
53738diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
53739index 070cc03..6806e37 100644
53740--- a/drivers/staging/rtl8712/rtl871x_io.h
53741+++ b/drivers/staging/rtl8712/rtl871x_io.h
53742@@ -108,7 +108,7 @@ struct _io_ops {
53743 u8 *pmem);
53744 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
53745 u8 *pmem);
53746-};
53747+} __no_const;
53748
53749 struct io_req {
53750 struct list_head list;
53751diff --git a/drivers/staging/unisys/visorchipset/visorchipset.h b/drivers/staging/unisys/visorchipset/visorchipset.h
53752index 98f3ba4..c6a7fce 100644
53753--- a/drivers/staging/unisys/visorchipset/visorchipset.h
53754+++ b/drivers/staging/unisys/visorchipset/visorchipset.h
53755@@ -171,7 +171,7 @@ struct visorchipset_busdev_notifiers {
53756 void (*device_resume)(ulong bus_no, ulong dev_no);
53757 int (*get_channel_info)(uuid_le type_uuid, ulong *min_size,
53758 ulong *max_size);
53759-};
53760+} __no_const;
53761
53762 /* These functions live inside visorchipset, and will be called to indicate
53763 * responses to specific events (by code outside of visorchipset).
53764@@ -186,7 +186,7 @@ struct visorchipset_busdev_responders {
53765 void (*device_destroy)(ulong bus_no, ulong dev_no, int response);
53766 void (*device_pause)(ulong bus_no, ulong dev_no, int response);
53767 void (*device_resume)(ulong bus_no, ulong dev_no, int response);
53768-};
53769+} __no_const;
53770
53771 /** Register functions (in the bus driver) to get called by visorchipset
53772 * whenever a bus or device appears for which this service partition is
53773diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
53774index 9512af6..045bf5a 100644
53775--- a/drivers/target/sbp/sbp_target.c
53776+++ b/drivers/target/sbp/sbp_target.c
53777@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
53778
53779 #define SESSION_MAINTENANCE_INTERVAL HZ
53780
53781-static atomic_t login_id = ATOMIC_INIT(0);
53782+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
53783
53784 static void session_maintenance_work(struct work_struct *);
53785 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
53786@@ -444,7 +444,7 @@ static void sbp_management_request_login(
53787 login->lun = se_lun;
53788 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
53789 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
53790- login->login_id = atomic_inc_return(&login_id);
53791+ login->login_id = atomic_inc_return_unchecked(&login_id);
53792
53793 login->tgt_agt = sbp_target_agent_register(login);
53794 if (IS_ERR(login->tgt_agt)) {
53795diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
53796index 7faa6ae..ae6c410 100644
53797--- a/drivers/target/target_core_device.c
53798+++ b/drivers/target/target_core_device.c
53799@@ -1495,7 +1495,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
53800 spin_lock_init(&dev->se_tmr_lock);
53801 spin_lock_init(&dev->qf_cmd_lock);
53802 sema_init(&dev->caw_sem, 1);
53803- atomic_set(&dev->dev_ordered_id, 0);
53804+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
53805 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
53806 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
53807 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
53808diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
53809index f786de0..04b643e 100644
53810--- a/drivers/target/target_core_transport.c
53811+++ b/drivers/target/target_core_transport.c
53812@@ -1168,7 +1168,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
53813 * Used to determine when ORDERED commands should go from
53814 * Dormant to Active status.
53815 */
53816- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
53817+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
53818 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
53819 cmd->se_ordered_id, cmd->sam_task_attr,
53820 dev->transport->name);
53821diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
53822index 031018e..90981a1 100644
53823--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
53824+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
53825@@ -272,8 +272,10 @@ static int int3400_thermal_probe(struct platform_device *pdev)
53826 platform_set_drvdata(pdev, priv);
53827
53828 if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
53829- int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
53830- int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
53831+ pax_open_kernel();
53832+ *(void **)&int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
53833+ *(void **)&int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
53834+ pax_close_kernel();
53835 }
53836 priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
53837 priv, &int3400_thermal_ops,
53838diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
53839index 668fb1b..2737bbe 100644
53840--- a/drivers/thermal/of-thermal.c
53841+++ b/drivers/thermal/of-thermal.c
53842@@ -31,6 +31,7 @@
53843 #include <linux/export.h>
53844 #include <linux/string.h>
53845 #include <linux/thermal.h>
53846+#include <linux/mm.h>
53847
53848 #include "thermal_core.h"
53849
53850@@ -412,9 +413,11 @@ thermal_zone_of_add_sensor(struct device_node *zone,
53851 tz->ops = ops;
53852 tz->sensor_data = data;
53853
53854- tzd->ops->get_temp = of_thermal_get_temp;
53855- tzd->ops->get_trend = of_thermal_get_trend;
53856- tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
53857+ pax_open_kernel();
53858+ *(void **)&tzd->ops->get_temp = of_thermal_get_temp;
53859+ *(void **)&tzd->ops->get_trend = of_thermal_get_trend;
53860+ *(void **)&tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
53861+ pax_close_kernel();
53862 mutex_unlock(&tzd->lock);
53863
53864 return tzd;
53865@@ -544,9 +547,11 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
53866 return;
53867
53868 mutex_lock(&tzd->lock);
53869- tzd->ops->get_temp = NULL;
53870- tzd->ops->get_trend = NULL;
53871- tzd->ops->set_emul_temp = NULL;
53872+ pax_open_kernel();
53873+ *(void **)&tzd->ops->get_temp = NULL;
53874+ *(void **)&tzd->ops->get_trend = NULL;
53875+ *(void **)&tzd->ops->set_emul_temp = NULL;
53876+ pax_close_kernel();
53877
53878 tz->ops = NULL;
53879 tz->sensor_data = NULL;
53880diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
53881index 9ea3d9d..53e8792 100644
53882--- a/drivers/thermal/x86_pkg_temp_thermal.c
53883+++ b/drivers/thermal/x86_pkg_temp_thermal.c
53884@@ -567,7 +567,7 @@ static int pkg_temp_thermal_cpu_callback(struct notifier_block *nfb,
53885 return NOTIFY_OK;
53886 }
53887
53888-static struct notifier_block pkg_temp_thermal_notifier __refdata = {
53889+static struct notifier_block pkg_temp_thermal_notifier __refconst = {
53890 .notifier_call = pkg_temp_thermal_cpu_callback,
53891 };
53892
53893diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
53894index fd66f57..48e6376 100644
53895--- a/drivers/tty/cyclades.c
53896+++ b/drivers/tty/cyclades.c
53897@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
53898 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
53899 info->port.count);
53900 #endif
53901- info->port.count++;
53902+ atomic_inc(&info->port.count);
53903 #ifdef CY_DEBUG_COUNT
53904 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
53905- current->pid, info->port.count);
53906+ current->pid, atomic_read(&info->port.count));
53907 #endif
53908
53909 /*
53910@@ -3974,7 +3974,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
53911 for (j = 0; j < cy_card[i].nports; j++) {
53912 info = &cy_card[i].ports[j];
53913
53914- if (info->port.count) {
53915+ if (atomic_read(&info->port.count)) {
53916 /* XXX is the ldisc num worth this? */
53917 struct tty_struct *tty;
53918 struct tty_ldisc *ld;
53919diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
53920index 4fcec1d..5a036f7 100644
53921--- a/drivers/tty/hvc/hvc_console.c
53922+++ b/drivers/tty/hvc/hvc_console.c
53923@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
53924
53925 spin_lock_irqsave(&hp->port.lock, flags);
53926 /* Check and then increment for fast path open. */
53927- if (hp->port.count++ > 0) {
53928+ if (atomic_inc_return(&hp->port.count) > 1) {
53929 spin_unlock_irqrestore(&hp->port.lock, flags);
53930 hvc_kick();
53931 return 0;
53932@@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
53933
53934 spin_lock_irqsave(&hp->port.lock, flags);
53935
53936- if (--hp->port.count == 0) {
53937+ if (atomic_dec_return(&hp->port.count) == 0) {
53938 spin_unlock_irqrestore(&hp->port.lock, flags);
53939 /* We are done with the tty pointer now. */
53940 tty_port_tty_set(&hp->port, NULL);
53941@@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
53942 */
53943 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
53944 } else {
53945- if (hp->port.count < 0)
53946+ if (atomic_read(&hp->port.count) < 0)
53947 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
53948- hp->vtermno, hp->port.count);
53949+ hp->vtermno, atomic_read(&hp->port.count));
53950 spin_unlock_irqrestore(&hp->port.lock, flags);
53951 }
53952 }
53953@@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty)
53954 * open->hangup case this can be called after the final close so prevent
53955 * that from happening for now.
53956 */
53957- if (hp->port.count <= 0) {
53958+ if (atomic_read(&hp->port.count) <= 0) {
53959 spin_unlock_irqrestore(&hp->port.lock, flags);
53960 return;
53961 }
53962
53963- hp->port.count = 0;
53964+ atomic_set(&hp->port.count, 0);
53965 spin_unlock_irqrestore(&hp->port.lock, flags);
53966 tty_port_tty_set(&hp->port, NULL);
53967
53968@@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
53969 return -EPIPE;
53970
53971 /* FIXME what's this (unprotected) check for? */
53972- if (hp->port.count <= 0)
53973+ if (atomic_read(&hp->port.count) <= 0)
53974 return -EIO;
53975
53976 spin_lock_irqsave(&hp->lock, flags);
53977diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
53978index 81ff7e1..dfb7b71 100644
53979--- a/drivers/tty/hvc/hvcs.c
53980+++ b/drivers/tty/hvc/hvcs.c
53981@@ -83,6 +83,7 @@
53982 #include <asm/hvcserver.h>
53983 #include <asm/uaccess.h>
53984 #include <asm/vio.h>
53985+#include <asm/local.h>
53986
53987 /*
53988 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
53989@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
53990
53991 spin_lock_irqsave(&hvcsd->lock, flags);
53992
53993- if (hvcsd->port.count > 0) {
53994+ if (atomic_read(&hvcsd->port.count) > 0) {
53995 spin_unlock_irqrestore(&hvcsd->lock, flags);
53996 printk(KERN_INFO "HVCS: vterm state unchanged. "
53997 "The hvcs device node is still in use.\n");
53998@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
53999 }
54000 }
54001
54002- hvcsd->port.count = 0;
54003+ atomic_set(&hvcsd->port.count, 0);
54004 hvcsd->port.tty = tty;
54005 tty->driver_data = hvcsd;
54006
54007@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
54008 unsigned long flags;
54009
54010 spin_lock_irqsave(&hvcsd->lock, flags);
54011- hvcsd->port.count++;
54012+ atomic_inc(&hvcsd->port.count);
54013 hvcsd->todo_mask |= HVCS_SCHED_READ;
54014 spin_unlock_irqrestore(&hvcsd->lock, flags);
54015
54016@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
54017 hvcsd = tty->driver_data;
54018
54019 spin_lock_irqsave(&hvcsd->lock, flags);
54020- if (--hvcsd->port.count == 0) {
54021+ if (atomic_dec_and_test(&hvcsd->port.count)) {
54022
54023 vio_disable_interrupts(hvcsd->vdev);
54024
54025@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
54026
54027 free_irq(irq, hvcsd);
54028 return;
54029- } else if (hvcsd->port.count < 0) {
54030+ } else if (atomic_read(&hvcsd->port.count) < 0) {
54031 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
54032 " is missmanaged.\n",
54033- hvcsd->vdev->unit_address, hvcsd->port.count);
54034+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
54035 }
54036
54037 spin_unlock_irqrestore(&hvcsd->lock, flags);
54038@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
54039
54040 spin_lock_irqsave(&hvcsd->lock, flags);
54041 /* Preserve this so that we know how many kref refs to put */
54042- temp_open_count = hvcsd->port.count;
54043+ temp_open_count = atomic_read(&hvcsd->port.count);
54044
54045 /*
54046 * Don't kref put inside the spinlock because the destruction
54047@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
54048 tty->driver_data = NULL;
54049 hvcsd->port.tty = NULL;
54050
54051- hvcsd->port.count = 0;
54052+ atomic_set(&hvcsd->port.count, 0);
54053
54054 /* This will drop any buffered data on the floor which is OK in a hangup
54055 * scenario. */
54056@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
54057 * the middle of a write operation? This is a crummy place to do this
54058 * but we want to keep it all in the spinlock.
54059 */
54060- if (hvcsd->port.count <= 0) {
54061+ if (atomic_read(&hvcsd->port.count) <= 0) {
54062 spin_unlock_irqrestore(&hvcsd->lock, flags);
54063 return -ENODEV;
54064 }
54065@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
54066 {
54067 struct hvcs_struct *hvcsd = tty->driver_data;
54068
54069- if (!hvcsd || hvcsd->port.count <= 0)
54070+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
54071 return 0;
54072
54073 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
54074diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
54075index 4190199..06d5bfa 100644
54076--- a/drivers/tty/hvc/hvsi.c
54077+++ b/drivers/tty/hvc/hvsi.c
54078@@ -85,7 +85,7 @@ struct hvsi_struct {
54079 int n_outbuf;
54080 uint32_t vtermno;
54081 uint32_t virq;
54082- atomic_t seqno; /* HVSI packet sequence number */
54083+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
54084 uint16_t mctrl;
54085 uint8_t state; /* HVSI protocol state */
54086 uint8_t flags;
54087@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
54088
54089 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
54090 packet.hdr.len = sizeof(struct hvsi_query_response);
54091- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54092+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54093 packet.verb = VSV_SEND_VERSION_NUMBER;
54094 packet.u.version = HVSI_VERSION;
54095 packet.query_seqno = query_seqno+1;
54096@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
54097
54098 packet.hdr.type = VS_QUERY_PACKET_HEADER;
54099 packet.hdr.len = sizeof(struct hvsi_query);
54100- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54101+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54102 packet.verb = verb;
54103
54104 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
54105@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
54106 int wrote;
54107
54108 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
54109- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54110+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54111 packet.hdr.len = sizeof(struct hvsi_control);
54112 packet.verb = VSV_SET_MODEM_CTL;
54113 packet.mask = HVSI_TSDTR;
54114@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
54115 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
54116
54117 packet.hdr.type = VS_DATA_PACKET_HEADER;
54118- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54119+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54120 packet.hdr.len = count + sizeof(struct hvsi_header);
54121 memcpy(&packet.data, buf, count);
54122
54123@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
54124 struct hvsi_control packet __ALIGNED__;
54125
54126 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
54127- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54128+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54129 packet.hdr.len = 6;
54130 packet.verb = VSV_CLOSE_PROTOCOL;
54131
54132@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
54133
54134 tty_port_tty_set(&hp->port, tty);
54135 spin_lock_irqsave(&hp->lock, flags);
54136- hp->port.count++;
54137+ atomic_inc(&hp->port.count);
54138 atomic_set(&hp->seqno, 0);
54139 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
54140 spin_unlock_irqrestore(&hp->lock, flags);
54141@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
54142
54143 spin_lock_irqsave(&hp->lock, flags);
54144
54145- if (--hp->port.count == 0) {
54146+ if (atomic_dec_return(&hp->port.count) == 0) {
54147 tty_port_tty_set(&hp->port, NULL);
54148 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
54149
54150@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
54151
54152 spin_lock_irqsave(&hp->lock, flags);
54153 }
54154- } else if (hp->port.count < 0)
54155+ } else if (atomic_read(&hp->port.count) < 0)
54156 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
54157- hp - hvsi_ports, hp->port.count);
54158+ hp - hvsi_ports, atomic_read(&hp->port.count));
54159
54160 spin_unlock_irqrestore(&hp->lock, flags);
54161 }
54162@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
54163 tty_port_tty_set(&hp->port, NULL);
54164
54165 spin_lock_irqsave(&hp->lock, flags);
54166- hp->port.count = 0;
54167+ atomic_set(&hp->port.count, 0);
54168 hp->n_outbuf = 0;
54169 spin_unlock_irqrestore(&hp->lock, flags);
54170 }
54171diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
54172index a270f04..7c77b5d 100644
54173--- a/drivers/tty/hvc/hvsi_lib.c
54174+++ b/drivers/tty/hvc/hvsi_lib.c
54175@@ -8,7 +8,7 @@
54176
54177 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
54178 {
54179- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
54180+ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
54181
54182 /* Assumes that always succeeds, works in practice */
54183 return pv->put_chars(pv->termno, (char *)packet, packet->len);
54184@@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
54185
54186 /* Reset state */
54187 pv->established = 0;
54188- atomic_set(&pv->seqno, 0);
54189+ atomic_set_unchecked(&pv->seqno, 0);
54190
54191 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
54192
54193diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
54194index 345cebb..d5a1e9e 100644
54195--- a/drivers/tty/ipwireless/tty.c
54196+++ b/drivers/tty/ipwireless/tty.c
54197@@ -28,6 +28,7 @@
54198 #include <linux/tty_driver.h>
54199 #include <linux/tty_flip.h>
54200 #include <linux/uaccess.h>
54201+#include <asm/local.h>
54202
54203 #include "tty.h"
54204 #include "network.h"
54205@@ -93,10 +94,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
54206 return -ENODEV;
54207
54208 mutex_lock(&tty->ipw_tty_mutex);
54209- if (tty->port.count == 0)
54210+ if (atomic_read(&tty->port.count) == 0)
54211 tty->tx_bytes_queued = 0;
54212
54213- tty->port.count++;
54214+ atomic_inc(&tty->port.count);
54215
54216 tty->port.tty = linux_tty;
54217 linux_tty->driver_data = tty;
54218@@ -112,9 +113,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
54219
54220 static void do_ipw_close(struct ipw_tty *tty)
54221 {
54222- tty->port.count--;
54223-
54224- if (tty->port.count == 0) {
54225+ if (atomic_dec_return(&tty->port.count) == 0) {
54226 struct tty_struct *linux_tty = tty->port.tty;
54227
54228 if (linux_tty != NULL) {
54229@@ -135,7 +134,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
54230 return;
54231
54232 mutex_lock(&tty->ipw_tty_mutex);
54233- if (tty->port.count == 0) {
54234+ if (atomic_read(&tty->port.count) == 0) {
54235 mutex_unlock(&tty->ipw_tty_mutex);
54236 return;
54237 }
54238@@ -158,7 +157,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
54239
54240 mutex_lock(&tty->ipw_tty_mutex);
54241
54242- if (!tty->port.count) {
54243+ if (!atomic_read(&tty->port.count)) {
54244 mutex_unlock(&tty->ipw_tty_mutex);
54245 return;
54246 }
54247@@ -197,7 +196,7 @@ static int ipw_write(struct tty_struct *linux_tty,
54248 return -ENODEV;
54249
54250 mutex_lock(&tty->ipw_tty_mutex);
54251- if (!tty->port.count) {
54252+ if (!atomic_read(&tty->port.count)) {
54253 mutex_unlock(&tty->ipw_tty_mutex);
54254 return -EINVAL;
54255 }
54256@@ -237,7 +236,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
54257 if (!tty)
54258 return -ENODEV;
54259
54260- if (!tty->port.count)
54261+ if (!atomic_read(&tty->port.count))
54262 return -EINVAL;
54263
54264 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
54265@@ -279,7 +278,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
54266 if (!tty)
54267 return 0;
54268
54269- if (!tty->port.count)
54270+ if (!atomic_read(&tty->port.count))
54271 return 0;
54272
54273 return tty->tx_bytes_queued;
54274@@ -360,7 +359,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
54275 if (!tty)
54276 return -ENODEV;
54277
54278- if (!tty->port.count)
54279+ if (!atomic_read(&tty->port.count))
54280 return -EINVAL;
54281
54282 return get_control_lines(tty);
54283@@ -376,7 +375,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
54284 if (!tty)
54285 return -ENODEV;
54286
54287- if (!tty->port.count)
54288+ if (!atomic_read(&tty->port.count))
54289 return -EINVAL;
54290
54291 return set_control_lines(tty, set, clear);
54292@@ -390,7 +389,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
54293 if (!tty)
54294 return -ENODEV;
54295
54296- if (!tty->port.count)
54297+ if (!atomic_read(&tty->port.count))
54298 return -EINVAL;
54299
54300 /* FIXME: Exactly how is the tty object locked here .. */
54301@@ -546,7 +545,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
54302 * are gone */
54303 mutex_lock(&ttyj->ipw_tty_mutex);
54304 }
54305- while (ttyj->port.count)
54306+ while (atomic_read(&ttyj->port.count))
54307 do_ipw_close(ttyj);
54308 ipwireless_disassociate_network_ttys(network,
54309 ttyj->channel_idx);
54310diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
54311index 14c54e0..1efd4f2 100644
54312--- a/drivers/tty/moxa.c
54313+++ b/drivers/tty/moxa.c
54314@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
54315 }
54316
54317 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
54318- ch->port.count++;
54319+ atomic_inc(&ch->port.count);
54320 tty->driver_data = ch;
54321 tty_port_tty_set(&ch->port, tty);
54322 mutex_lock(&ch->port.mutex);
54323diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
54324index c434376..114ce13 100644
54325--- a/drivers/tty/n_gsm.c
54326+++ b/drivers/tty/n_gsm.c
54327@@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
54328 spin_lock_init(&dlci->lock);
54329 mutex_init(&dlci->mutex);
54330 dlci->fifo = &dlci->_fifo;
54331- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
54332+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
54333 kfree(dlci);
54334 return NULL;
54335 }
54336@@ -2958,7 +2958,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
54337 struct gsm_dlci *dlci = tty->driver_data;
54338 struct tty_port *port = &dlci->port;
54339
54340- port->count++;
54341+ atomic_inc(&port->count);
54342 tty_port_tty_set(port, tty);
54343
54344 dlci->modem_rx = 0;
54345diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
54346index cf6e0f2..4283167 100644
54347--- a/drivers/tty/n_tty.c
54348+++ b/drivers/tty/n_tty.c
54349@@ -116,7 +116,7 @@ struct n_tty_data {
54350 int minimum_to_wake;
54351
54352 /* consumer-published */
54353- size_t read_tail;
54354+ size_t read_tail __intentional_overflow(-1);
54355 size_t line_start;
54356
54357 /* protected by output lock */
54358@@ -2547,6 +2547,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
54359 {
54360 *ops = tty_ldisc_N_TTY;
54361 ops->owner = NULL;
54362- ops->refcount = ops->flags = 0;
54363+ atomic_set(&ops->refcount, 0);
54364+ ops->flags = 0;
54365 }
54366 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
54367diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
54368index e72ee62..d977ad9 100644
54369--- a/drivers/tty/pty.c
54370+++ b/drivers/tty/pty.c
54371@@ -848,8 +848,10 @@ static void __init unix98_pty_init(void)
54372 panic("Couldn't register Unix98 pts driver");
54373
54374 /* Now create the /dev/ptmx special device */
54375+ pax_open_kernel();
54376 tty_default_fops(&ptmx_fops);
54377- ptmx_fops.open = ptmx_open;
54378+ *(void **)&ptmx_fops.open = ptmx_open;
54379+ pax_close_kernel();
54380
54381 cdev_init(&ptmx_cdev, &ptmx_fops);
54382 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
54383diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
54384index c8dd8dc..dca6cfd 100644
54385--- a/drivers/tty/rocket.c
54386+++ b/drivers/tty/rocket.c
54387@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
54388 tty->driver_data = info;
54389 tty_port_tty_set(port, tty);
54390
54391- if (port->count++ == 0) {
54392+ if (atomic_inc_return(&port->count) == 1) {
54393 atomic_inc(&rp_num_ports_open);
54394
54395 #ifdef ROCKET_DEBUG_OPEN
54396@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
54397 #endif
54398 }
54399 #ifdef ROCKET_DEBUG_OPEN
54400- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
54401+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
54402 #endif
54403
54404 /*
54405@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
54406 spin_unlock_irqrestore(&info->port.lock, flags);
54407 return;
54408 }
54409- if (info->port.count)
54410+ if (atomic_read(&info->port.count))
54411 atomic_dec(&rp_num_ports_open);
54412 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
54413 spin_unlock_irqrestore(&info->port.lock, flags);
54414diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
54415index aa28209..e08fb85 100644
54416--- a/drivers/tty/serial/ioc4_serial.c
54417+++ b/drivers/tty/serial/ioc4_serial.c
54418@@ -437,7 +437,7 @@ struct ioc4_soft {
54419 } is_intr_info[MAX_IOC4_INTR_ENTS];
54420
54421 /* Number of entries active in the above array */
54422- atomic_t is_num_intrs;
54423+ atomic_unchecked_t is_num_intrs;
54424 } is_intr_type[IOC4_NUM_INTR_TYPES];
54425
54426 /* is_ir_lock must be held while
54427@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
54428 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
54429 || (type == IOC4_OTHER_INTR_TYPE)));
54430
54431- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
54432+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
54433 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
54434
54435 /* Save off the lower level interrupt handler */
54436@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
54437
54438 soft = arg;
54439 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
54440- num_intrs = (int)atomic_read(
54441+ num_intrs = (int)atomic_read_unchecked(
54442 &soft->is_intr_type[intr_type].is_num_intrs);
54443
54444 this_mir = this_ir = pending_intrs(soft, intr_type);
54445diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
54446index 129dc5b..1da5bb8 100644
54447--- a/drivers/tty/serial/kgdb_nmi.c
54448+++ b/drivers/tty/serial/kgdb_nmi.c
54449@@ -53,7 +53,9 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
54450 * I/O utilities that messages sent to the console will automatically
54451 * be displayed on the dbg_io.
54452 */
54453- dbg_io_ops->is_console = true;
54454+ pax_open_kernel();
54455+ *(int *)&dbg_io_ops->is_console = true;
54456+ pax_close_kernel();
54457
54458 return 0;
54459 }
54460diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
54461index a260cde..6b2b5ce 100644
54462--- a/drivers/tty/serial/kgdboc.c
54463+++ b/drivers/tty/serial/kgdboc.c
54464@@ -24,8 +24,9 @@
54465 #define MAX_CONFIG_LEN 40
54466
54467 static struct kgdb_io kgdboc_io_ops;
54468+static struct kgdb_io kgdboc_io_ops_console;
54469
54470-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
54471+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
54472 static int configured = -1;
54473
54474 static char config[MAX_CONFIG_LEN];
54475@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
54476 kgdboc_unregister_kbd();
54477 if (configured == 1)
54478 kgdb_unregister_io_module(&kgdboc_io_ops);
54479+ else if (configured == 2)
54480+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
54481 }
54482
54483 static int configure_kgdboc(void)
54484@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
54485 int err;
54486 char *cptr = config;
54487 struct console *cons;
54488+ int is_console = 0;
54489
54490 err = kgdboc_option_setup(config);
54491 if (err || !strlen(config) || isspace(config[0]))
54492 goto noconfig;
54493
54494 err = -ENODEV;
54495- kgdboc_io_ops.is_console = 0;
54496 kgdb_tty_driver = NULL;
54497
54498 kgdboc_use_kms = 0;
54499@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
54500 int idx;
54501 if (cons->device && cons->device(cons, &idx) == p &&
54502 idx == tty_line) {
54503- kgdboc_io_ops.is_console = 1;
54504+ is_console = 1;
54505 break;
54506 }
54507 cons = cons->next;
54508@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
54509 kgdb_tty_line = tty_line;
54510
54511 do_register:
54512- err = kgdb_register_io_module(&kgdboc_io_ops);
54513+ if (is_console) {
54514+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
54515+ configured = 2;
54516+ } else {
54517+ err = kgdb_register_io_module(&kgdboc_io_ops);
54518+ configured = 1;
54519+ }
54520 if (err)
54521 goto noconfig;
54522
54523@@ -205,8 +214,6 @@ do_register:
54524 if (err)
54525 goto nmi_con_failed;
54526
54527- configured = 1;
54528-
54529 return 0;
54530
54531 nmi_con_failed:
54532@@ -223,7 +230,7 @@ noconfig:
54533 static int __init init_kgdboc(void)
54534 {
54535 /* Already configured? */
54536- if (configured == 1)
54537+ if (configured >= 1)
54538 return 0;
54539
54540 return configure_kgdboc();
54541@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
54542 if (config[len - 1] == '\n')
54543 config[len - 1] = '\0';
54544
54545- if (configured == 1)
54546+ if (configured >= 1)
54547 cleanup_kgdboc();
54548
54549 /* Go and configure with the new params. */
54550@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
54551 .post_exception = kgdboc_post_exp_handler,
54552 };
54553
54554+static struct kgdb_io kgdboc_io_ops_console = {
54555+ .name = "kgdboc",
54556+ .read_char = kgdboc_get_char,
54557+ .write_char = kgdboc_put_char,
54558+ .pre_exception = kgdboc_pre_exp_handler,
54559+ .post_exception = kgdboc_post_exp_handler,
54560+ .is_console = 1
54561+};
54562+
54563 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
54564 /* This is only available if kgdboc is a built in for early debugging */
54565 static int __init kgdboc_early_init(char *opt)
54566diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
54567index b73889c..9f74f0a 100644
54568--- a/drivers/tty/serial/msm_serial.c
54569+++ b/drivers/tty/serial/msm_serial.c
54570@@ -1012,7 +1012,7 @@ static struct uart_driver msm_uart_driver = {
54571 .cons = MSM_CONSOLE,
54572 };
54573
54574-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
54575+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
54576
54577 static const struct of_device_id msm_uartdm_table[] = {
54578 { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
54579@@ -1036,7 +1036,7 @@ static int msm_serial_probe(struct platform_device *pdev)
54580 line = pdev->id;
54581
54582 if (line < 0)
54583- line = atomic_inc_return(&msm_uart_next_id) - 1;
54584+ line = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
54585
54586 if (unlikely(line < 0 || line >= UART_NR))
54587 return -ENXIO;
54588diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
54589index cf08876..711e0bf 100644
54590--- a/drivers/tty/serial/samsung.c
54591+++ b/drivers/tty/serial/samsung.c
54592@@ -987,11 +987,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
54593 ourport->tx_in_progress = 0;
54594 }
54595
54596+static int s3c64xx_serial_startup(struct uart_port *port);
54597 static int s3c24xx_serial_startup(struct uart_port *port)
54598 {
54599 struct s3c24xx_uart_port *ourport = to_ourport(port);
54600 int ret;
54601
54602+ /* Startup sequence is different for s3c64xx and higher SoC's */
54603+ if (s3c24xx_serial_has_interrupt_mask(port))
54604+ return s3c64xx_serial_startup(port);
54605+
54606 dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
54607 port, (unsigned long long)port->mapbase, port->membase);
54608
54609@@ -1697,10 +1702,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
54610 /* setup info for port */
54611 port->dev = &platdev->dev;
54612
54613- /* Startup sequence is different for s3c64xx and higher SoC's */
54614- if (s3c24xx_serial_has_interrupt_mask(port))
54615- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
54616-
54617 port->uartclk = 1;
54618
54619 if (cfg->uart_flags & UPF_CONS_FLOW) {
54620diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
54621index 6a1055a..5ca9ad9 100644
54622--- a/drivers/tty/serial/serial_core.c
54623+++ b/drivers/tty/serial/serial_core.c
54624@@ -1377,7 +1377,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
54625 state = drv->state + tty->index;
54626 port = &state->port;
54627 spin_lock_irq(&port->lock);
54628- --port->count;
54629+ atomic_dec(&port->count);
54630 spin_unlock_irq(&port->lock);
54631 return;
54632 }
54633@@ -1387,7 +1387,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
54634
54635 pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
54636
54637- if (!port->count || tty_port_close_start(port, tty, filp) == 0)
54638+ if (!atomic_read(&port->count) || tty_port_close_start(port, tty, filp) == 0)
54639 return;
54640
54641 /*
54642@@ -1511,7 +1511,7 @@ static void uart_hangup(struct tty_struct *tty)
54643 uart_flush_buffer(tty);
54644 uart_shutdown(tty, state);
54645 spin_lock_irqsave(&port->lock, flags);
54646- port->count = 0;
54647+ atomic_set(&port->count, 0);
54648 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
54649 spin_unlock_irqrestore(&port->lock, flags);
54650 tty_port_tty_set(port, NULL);
54651@@ -1598,7 +1598,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
54652 pr_debug("uart_open(%d) called\n", line);
54653
54654 spin_lock_irq(&port->lock);
54655- ++port->count;
54656+ atomic_inc(&port->count);
54657 spin_unlock_irq(&port->lock);
54658
54659 /*
54660diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
54661index b799170..87dafd5 100644
54662--- a/drivers/tty/synclink.c
54663+++ b/drivers/tty/synclink.c
54664@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
54665
54666 if (debug_level >= DEBUG_LEVEL_INFO)
54667 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
54668- __FILE__,__LINE__, info->device_name, info->port.count);
54669+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
54670
54671 if (tty_port_close_start(&info->port, tty, filp) == 0)
54672 goto cleanup;
54673@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
54674 cleanup:
54675 if (debug_level >= DEBUG_LEVEL_INFO)
54676 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
54677- tty->driver->name, info->port.count);
54678+ tty->driver->name, atomic_read(&info->port.count));
54679
54680 } /* end of mgsl_close() */
54681
54682@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
54683
54684 mgsl_flush_buffer(tty);
54685 shutdown(info);
54686-
54687- info->port.count = 0;
54688+
54689+ atomic_set(&info->port.count, 0);
54690 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54691 info->port.tty = NULL;
54692
54693@@ -3296,10 +3296,10 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54694
54695 if (debug_level >= DEBUG_LEVEL_INFO)
54696 printk("%s(%d):block_til_ready before block on %s count=%d\n",
54697- __FILE__,__LINE__, tty->driver->name, port->count );
54698+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54699
54700 spin_lock_irqsave(&info->irq_spinlock, flags);
54701- port->count--;
54702+ atomic_dec(&port->count);
54703 spin_unlock_irqrestore(&info->irq_spinlock, flags);
54704 port->blocked_open++;
54705
54706@@ -3327,7 +3327,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54707
54708 if (debug_level >= DEBUG_LEVEL_INFO)
54709 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
54710- __FILE__,__LINE__, tty->driver->name, port->count );
54711+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54712
54713 tty_unlock(tty);
54714 schedule();
54715@@ -3339,12 +3339,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54716
54717 /* FIXME: Racy on hangup during close wait */
54718 if (!tty_hung_up_p(filp))
54719- port->count++;
54720+ atomic_inc(&port->count);
54721 port->blocked_open--;
54722
54723 if (debug_level >= DEBUG_LEVEL_INFO)
54724 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
54725- __FILE__,__LINE__, tty->driver->name, port->count );
54726+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54727
54728 if (!retval)
54729 port->flags |= ASYNC_NORMAL_ACTIVE;
54730@@ -3396,7 +3396,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
54731
54732 if (debug_level >= DEBUG_LEVEL_INFO)
54733 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
54734- __FILE__,__LINE__,tty->driver->name, info->port.count);
54735+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
54736
54737 /* If port is closing, signal caller to try again */
54738 if (info->port.flags & ASYNC_CLOSING){
54739@@ -3415,10 +3415,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
54740 spin_unlock_irqrestore(&info->netlock, flags);
54741 goto cleanup;
54742 }
54743- info->port.count++;
54744+ atomic_inc(&info->port.count);
54745 spin_unlock_irqrestore(&info->netlock, flags);
54746
54747- if (info->port.count == 1) {
54748+ if (atomic_read(&info->port.count) == 1) {
54749 /* 1st open on this device, init hardware */
54750 retval = startup(info);
54751 if (retval < 0)
54752@@ -3442,8 +3442,8 @@ cleanup:
54753 if (retval) {
54754 if (tty->count == 1)
54755 info->port.tty = NULL; /* tty layer will release tty struct */
54756- if(info->port.count)
54757- info->port.count--;
54758+ if (atomic_read(&info->port.count))
54759+ atomic_dec(&info->port.count);
54760 }
54761
54762 return retval;
54763@@ -7661,7 +7661,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54764 unsigned short new_crctype;
54765
54766 /* return error if TTY interface open */
54767- if (info->port.count)
54768+ if (atomic_read(&info->port.count))
54769 return -EBUSY;
54770
54771 switch (encoding)
54772@@ -7756,7 +7756,7 @@ static int hdlcdev_open(struct net_device *dev)
54773
54774 /* arbitrate between network and tty opens */
54775 spin_lock_irqsave(&info->netlock, flags);
54776- if (info->port.count != 0 || info->netcount != 0) {
54777+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54778 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
54779 spin_unlock_irqrestore(&info->netlock, flags);
54780 return -EBUSY;
54781@@ -7842,7 +7842,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54782 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
54783
54784 /* return error if TTY interface open */
54785- if (info->port.count)
54786+ if (atomic_read(&info->port.count))
54787 return -EBUSY;
54788
54789 if (cmd != SIOCWANDEV)
54790diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
54791index 0e8c39b..e0cb171 100644
54792--- a/drivers/tty/synclink_gt.c
54793+++ b/drivers/tty/synclink_gt.c
54794@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
54795 tty->driver_data = info;
54796 info->port.tty = tty;
54797
54798- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
54799+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
54800
54801 /* If port is closing, signal caller to try again */
54802 if (info->port.flags & ASYNC_CLOSING){
54803@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
54804 mutex_unlock(&info->port.mutex);
54805 goto cleanup;
54806 }
54807- info->port.count++;
54808+ atomic_inc(&info->port.count);
54809 spin_unlock_irqrestore(&info->netlock, flags);
54810
54811- if (info->port.count == 1) {
54812+ if (atomic_read(&info->port.count) == 1) {
54813 /* 1st open on this device, init hardware */
54814 retval = startup(info);
54815 if (retval < 0) {
54816@@ -715,8 +715,8 @@ cleanup:
54817 if (retval) {
54818 if (tty->count == 1)
54819 info->port.tty = NULL; /* tty layer will release tty struct */
54820- if(info->port.count)
54821- info->port.count--;
54822+ if(atomic_read(&info->port.count))
54823+ atomic_dec(&info->port.count);
54824 }
54825
54826 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
54827@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54828
54829 if (sanity_check(info, tty->name, "close"))
54830 return;
54831- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
54832+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
54833
54834 if (tty_port_close_start(&info->port, tty, filp) == 0)
54835 goto cleanup;
54836@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54837 tty_port_close_end(&info->port, tty);
54838 info->port.tty = NULL;
54839 cleanup:
54840- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
54841+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
54842 }
54843
54844 static void hangup(struct tty_struct *tty)
54845@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
54846 shutdown(info);
54847
54848 spin_lock_irqsave(&info->port.lock, flags);
54849- info->port.count = 0;
54850+ atomic_set(&info->port.count, 0);
54851 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54852 info->port.tty = NULL;
54853 spin_unlock_irqrestore(&info->port.lock, flags);
54854@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54855 unsigned short new_crctype;
54856
54857 /* return error if TTY interface open */
54858- if (info->port.count)
54859+ if (atomic_read(&info->port.count))
54860 return -EBUSY;
54861
54862 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
54863@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
54864
54865 /* arbitrate between network and tty opens */
54866 spin_lock_irqsave(&info->netlock, flags);
54867- if (info->port.count != 0 || info->netcount != 0) {
54868+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54869 DBGINFO(("%s hdlc_open busy\n", dev->name));
54870 spin_unlock_irqrestore(&info->netlock, flags);
54871 return -EBUSY;
54872@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54873 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
54874
54875 /* return error if TTY interface open */
54876- if (info->port.count)
54877+ if (atomic_read(&info->port.count))
54878 return -EBUSY;
54879
54880 if (cmd != SIOCWANDEV)
54881@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
54882 if (port == NULL)
54883 continue;
54884 spin_lock(&port->lock);
54885- if ((port->port.count || port->netcount) &&
54886+ if ((atomic_read(&port->port.count) || port->netcount) &&
54887 port->pending_bh && !port->bh_running &&
54888 !port->bh_requested) {
54889 DBGISR(("%s bh queued\n", port->device_name));
54890@@ -3299,7 +3299,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54891 add_wait_queue(&port->open_wait, &wait);
54892
54893 spin_lock_irqsave(&info->lock, flags);
54894- port->count--;
54895+ atomic_dec(&port->count);
54896 spin_unlock_irqrestore(&info->lock, flags);
54897 port->blocked_open++;
54898
54899@@ -3335,7 +3335,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54900 remove_wait_queue(&port->open_wait, &wait);
54901
54902 if (!tty_hung_up_p(filp))
54903- port->count++;
54904+ atomic_inc(&port->count);
54905 port->blocked_open--;
54906
54907 if (!retval)
54908diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
54909index c3f9091..abe4601 100644
54910--- a/drivers/tty/synclinkmp.c
54911+++ b/drivers/tty/synclinkmp.c
54912@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
54913
54914 if (debug_level >= DEBUG_LEVEL_INFO)
54915 printk("%s(%d):%s open(), old ref count = %d\n",
54916- __FILE__,__LINE__,tty->driver->name, info->port.count);
54917+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
54918
54919 /* If port is closing, signal caller to try again */
54920 if (info->port.flags & ASYNC_CLOSING){
54921@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
54922 spin_unlock_irqrestore(&info->netlock, flags);
54923 goto cleanup;
54924 }
54925- info->port.count++;
54926+ atomic_inc(&info->port.count);
54927 spin_unlock_irqrestore(&info->netlock, flags);
54928
54929- if (info->port.count == 1) {
54930+ if (atomic_read(&info->port.count) == 1) {
54931 /* 1st open on this device, init hardware */
54932 retval = startup(info);
54933 if (retval < 0)
54934@@ -796,8 +796,8 @@ cleanup:
54935 if (retval) {
54936 if (tty->count == 1)
54937 info->port.tty = NULL; /* tty layer will release tty struct */
54938- if(info->port.count)
54939- info->port.count--;
54940+ if(atomic_read(&info->port.count))
54941+ atomic_dec(&info->port.count);
54942 }
54943
54944 return retval;
54945@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54946
54947 if (debug_level >= DEBUG_LEVEL_INFO)
54948 printk("%s(%d):%s close() entry, count=%d\n",
54949- __FILE__,__LINE__, info->device_name, info->port.count);
54950+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
54951
54952 if (tty_port_close_start(&info->port, tty, filp) == 0)
54953 goto cleanup;
54954@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54955 cleanup:
54956 if (debug_level >= DEBUG_LEVEL_INFO)
54957 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
54958- tty->driver->name, info->port.count);
54959+ tty->driver->name, atomic_read(&info->port.count));
54960 }
54961
54962 /* Called by tty_hangup() when a hangup is signaled.
54963@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
54964 shutdown(info);
54965
54966 spin_lock_irqsave(&info->port.lock, flags);
54967- info->port.count = 0;
54968+ atomic_set(&info->port.count, 0);
54969 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54970 info->port.tty = NULL;
54971 spin_unlock_irqrestore(&info->port.lock, flags);
54972@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54973 unsigned short new_crctype;
54974
54975 /* return error if TTY interface open */
54976- if (info->port.count)
54977+ if (atomic_read(&info->port.count))
54978 return -EBUSY;
54979
54980 switch (encoding)
54981@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
54982
54983 /* arbitrate between network and tty opens */
54984 spin_lock_irqsave(&info->netlock, flags);
54985- if (info->port.count != 0 || info->netcount != 0) {
54986+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54987 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
54988 spin_unlock_irqrestore(&info->netlock, flags);
54989 return -EBUSY;
54990@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54991 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
54992
54993 /* return error if TTY interface open */
54994- if (info->port.count)
54995+ if (atomic_read(&info->port.count))
54996 return -EBUSY;
54997
54998 if (cmd != SIOCWANDEV)
54999@@ -2621,7 +2621,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
55000 * do not request bottom half processing if the
55001 * device is not open in a normal mode.
55002 */
55003- if ( port && (port->port.count || port->netcount) &&
55004+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
55005 port->pending_bh && !port->bh_running &&
55006 !port->bh_requested ) {
55007 if ( debug_level >= DEBUG_LEVEL_ISR )
55008@@ -3318,10 +3318,10 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
55009
55010 if (debug_level >= DEBUG_LEVEL_INFO)
55011 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
55012- __FILE__,__LINE__, tty->driver->name, port->count );
55013+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
55014
55015 spin_lock_irqsave(&info->lock, flags);
55016- port->count--;
55017+ atomic_dec(&port->count);
55018 spin_unlock_irqrestore(&info->lock, flags);
55019 port->blocked_open++;
55020
55021@@ -3349,7 +3349,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
55022
55023 if (debug_level >= DEBUG_LEVEL_INFO)
55024 printk("%s(%d):%s block_til_ready() count=%d\n",
55025- __FILE__,__LINE__, tty->driver->name, port->count );
55026+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
55027
55028 tty_unlock(tty);
55029 schedule();
55030@@ -3359,12 +3359,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
55031 set_current_state(TASK_RUNNING);
55032 remove_wait_queue(&port->open_wait, &wait);
55033 if (!tty_hung_up_p(filp))
55034- port->count++;
55035+ atomic_inc(&port->count);
55036 port->blocked_open--;
55037
55038 if (debug_level >= DEBUG_LEVEL_INFO)
55039 printk("%s(%d):%s block_til_ready() after, count=%d\n",
55040- __FILE__,__LINE__, tty->driver->name, port->count );
55041+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
55042
55043 if (!retval)
55044 port->flags |= ASYNC_NORMAL_ACTIVE;
55045diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
55046index 259a4d5..9b0c9e7 100644
55047--- a/drivers/tty/sysrq.c
55048+++ b/drivers/tty/sysrq.c
55049@@ -1085,7 +1085,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
55050 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
55051 size_t count, loff_t *ppos)
55052 {
55053- if (count) {
55054+ if (count && capable(CAP_SYS_ADMIN)) {
55055 char c;
55056
55057 if (get_user(c, buf))
55058diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
55059index 2bb4dfc..a7f6e86 100644
55060--- a/drivers/tty/tty_io.c
55061+++ b/drivers/tty/tty_io.c
55062@@ -3503,7 +3503,7 @@ EXPORT_SYMBOL(tty_devnum);
55063
55064 void tty_default_fops(struct file_operations *fops)
55065 {
55066- *fops = tty_fops;
55067+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
55068 }
55069
55070 /*
55071diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
55072index 3737f55..7cef448 100644
55073--- a/drivers/tty/tty_ldisc.c
55074+++ b/drivers/tty/tty_ldisc.c
55075@@ -71,7 +71,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
55076 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
55077 tty_ldiscs[disc] = new_ldisc;
55078 new_ldisc->num = disc;
55079- new_ldisc->refcount = 0;
55080+ atomic_set(&new_ldisc->refcount, 0);
55081 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
55082
55083 return ret;
55084@@ -99,7 +99,7 @@ int tty_unregister_ldisc(int disc)
55085 return -EINVAL;
55086
55087 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
55088- if (tty_ldiscs[disc]->refcount)
55089+ if (atomic_read(&tty_ldiscs[disc]->refcount))
55090 ret = -EBUSY;
55091 else
55092 tty_ldiscs[disc] = NULL;
55093@@ -120,7 +120,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
55094 if (ldops) {
55095 ret = ERR_PTR(-EAGAIN);
55096 if (try_module_get(ldops->owner)) {
55097- ldops->refcount++;
55098+ atomic_inc(&ldops->refcount);
55099 ret = ldops;
55100 }
55101 }
55102@@ -133,7 +133,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
55103 unsigned long flags;
55104
55105 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
55106- ldops->refcount--;
55107+ atomic_dec(&ldops->refcount);
55108 module_put(ldops->owner);
55109 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
55110 }
55111diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
55112index 40b31835..94d92ae 100644
55113--- a/drivers/tty/tty_port.c
55114+++ b/drivers/tty/tty_port.c
55115@@ -236,7 +236,7 @@ void tty_port_hangup(struct tty_port *port)
55116 unsigned long flags;
55117
55118 spin_lock_irqsave(&port->lock, flags);
55119- port->count = 0;
55120+ atomic_set(&port->count, 0);
55121 port->flags &= ~ASYNC_NORMAL_ACTIVE;
55122 tty = port->tty;
55123 if (tty)
55124@@ -398,7 +398,7 @@ int tty_port_block_til_ready(struct tty_port *port,
55125
55126 /* The port lock protects the port counts */
55127 spin_lock_irqsave(&port->lock, flags);
55128- port->count--;
55129+ atomic_dec(&port->count);
55130 port->blocked_open++;
55131 spin_unlock_irqrestore(&port->lock, flags);
55132
55133@@ -440,7 +440,7 @@ int tty_port_block_til_ready(struct tty_port *port,
55134 we must not mess that up further */
55135 spin_lock_irqsave(&port->lock, flags);
55136 if (!tty_hung_up_p(filp))
55137- port->count++;
55138+ atomic_inc(&port->count);
55139 port->blocked_open--;
55140 if (retval == 0)
55141 port->flags |= ASYNC_NORMAL_ACTIVE;
55142@@ -476,19 +476,19 @@ int tty_port_close_start(struct tty_port *port,
55143 return 0;
55144
55145 spin_lock_irqsave(&port->lock, flags);
55146- if (tty->count == 1 && port->count != 1) {
55147+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
55148 printk(KERN_WARNING
55149 "tty_port_close_start: tty->count = 1 port count = %d.\n",
55150- port->count);
55151- port->count = 1;
55152+ atomic_read(&port->count));
55153+ atomic_set(&port->count, 1);
55154 }
55155- if (--port->count < 0) {
55156+ if (atomic_dec_return(&port->count) < 0) {
55157 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
55158- port->count);
55159- port->count = 0;
55160+ atomic_read(&port->count));
55161+ atomic_set(&port->count, 0);
55162 }
55163
55164- if (port->count) {
55165+ if (atomic_read(&port->count)) {
55166 spin_unlock_irqrestore(&port->lock, flags);
55167 return 0;
55168 }
55169@@ -590,7 +590,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
55170 struct file *filp)
55171 {
55172 spin_lock_irq(&port->lock);
55173- ++port->count;
55174+ atomic_inc(&port->count);
55175 spin_unlock_irq(&port->lock);
55176 tty_port_tty_set(port, tty);
55177
55178diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
55179index 8a89f6e..50b32af 100644
55180--- a/drivers/tty/vt/keyboard.c
55181+++ b/drivers/tty/vt/keyboard.c
55182@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
55183 kbd->kbdmode == VC_OFF) &&
55184 value != KVAL(K_SAK))
55185 return; /* SAK is allowed even in raw mode */
55186+
55187+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
55188+ {
55189+ void *func = fn_handler[value];
55190+ if (func == fn_show_state || func == fn_show_ptregs ||
55191+ func == fn_show_mem)
55192+ return;
55193+ }
55194+#endif
55195+
55196 fn_handler[value](vc);
55197 }
55198
55199@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
55200 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
55201 return -EFAULT;
55202
55203- if (!capable(CAP_SYS_TTY_CONFIG))
55204- perm = 0;
55205-
55206 switch (cmd) {
55207 case KDGKBENT:
55208 /* Ensure another thread doesn't free it under us */
55209@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
55210 spin_unlock_irqrestore(&kbd_event_lock, flags);
55211 return put_user(val, &user_kbe->kb_value);
55212 case KDSKBENT:
55213+ if (!capable(CAP_SYS_TTY_CONFIG))
55214+ perm = 0;
55215+
55216 if (!perm)
55217 return -EPERM;
55218 if (!i && v == K_NOSUCHMAP) {
55219@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
55220 int i, j, k;
55221 int ret;
55222
55223- if (!capable(CAP_SYS_TTY_CONFIG))
55224- perm = 0;
55225-
55226 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
55227 if (!kbs) {
55228 ret = -ENOMEM;
55229@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
55230 kfree(kbs);
55231 return ((p && *p) ? -EOVERFLOW : 0);
55232 case KDSKBSENT:
55233+ if (!capable(CAP_SYS_TTY_CONFIG))
55234+ perm = 0;
55235+
55236 if (!perm) {
55237 ret = -EPERM;
55238 goto reterr;
55239diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
55240index 6276f13..84f2449 100644
55241--- a/drivers/uio/uio.c
55242+++ b/drivers/uio/uio.c
55243@@ -25,6 +25,7 @@
55244 #include <linux/kobject.h>
55245 #include <linux/cdev.h>
55246 #include <linux/uio_driver.h>
55247+#include <asm/local.h>
55248
55249 #define UIO_MAX_DEVICES (1U << MINORBITS)
55250
55251@@ -231,7 +232,7 @@ static ssize_t event_show(struct device *dev,
55252 struct device_attribute *attr, char *buf)
55253 {
55254 struct uio_device *idev = dev_get_drvdata(dev);
55255- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
55256+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
55257 }
55258 static DEVICE_ATTR_RO(event);
55259
55260@@ -393,7 +394,7 @@ void uio_event_notify(struct uio_info *info)
55261 {
55262 struct uio_device *idev = info->uio_dev;
55263
55264- atomic_inc(&idev->event);
55265+ atomic_inc_unchecked(&idev->event);
55266 wake_up_interruptible(&idev->wait);
55267 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
55268 }
55269@@ -446,7 +447,7 @@ static int uio_open(struct inode *inode, struct file *filep)
55270 }
55271
55272 listener->dev = idev;
55273- listener->event_count = atomic_read(&idev->event);
55274+ listener->event_count = atomic_read_unchecked(&idev->event);
55275 filep->private_data = listener;
55276
55277 if (idev->info->open) {
55278@@ -497,7 +498,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
55279 return -EIO;
55280
55281 poll_wait(filep, &idev->wait, wait);
55282- if (listener->event_count != atomic_read(&idev->event))
55283+ if (listener->event_count != atomic_read_unchecked(&idev->event))
55284 return POLLIN | POLLRDNORM;
55285 return 0;
55286 }
55287@@ -522,7 +523,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
55288 do {
55289 set_current_state(TASK_INTERRUPTIBLE);
55290
55291- event_count = atomic_read(&idev->event);
55292+ event_count = atomic_read_unchecked(&idev->event);
55293 if (event_count != listener->event_count) {
55294 if (copy_to_user(buf, &event_count, count))
55295 retval = -EFAULT;
55296@@ -579,9 +580,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
55297 static int uio_find_mem_index(struct vm_area_struct *vma)
55298 {
55299 struct uio_device *idev = vma->vm_private_data;
55300+ unsigned long size;
55301
55302 if (vma->vm_pgoff < MAX_UIO_MAPS) {
55303- if (idev->info->mem[vma->vm_pgoff].size == 0)
55304+ size = idev->info->mem[vma->vm_pgoff].size;
55305+ if (size == 0)
55306+ return -1;
55307+ if (vma->vm_end - vma->vm_start > size)
55308 return -1;
55309 return (int)vma->vm_pgoff;
55310 }
55311@@ -813,7 +818,7 @@ int __uio_register_device(struct module *owner,
55312 idev->owner = owner;
55313 idev->info = info;
55314 init_waitqueue_head(&idev->wait);
55315- atomic_set(&idev->event, 0);
55316+ atomic_set_unchecked(&idev->event, 0);
55317
55318 ret = uio_get_minor(idev);
55319 if (ret)
55320diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
55321index 813d4d3..a71934f 100644
55322--- a/drivers/usb/atm/cxacru.c
55323+++ b/drivers/usb/atm/cxacru.c
55324@@ -472,7 +472,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
55325 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
55326 if (ret < 2)
55327 return -EINVAL;
55328- if (index < 0 || index > 0x7f)
55329+ if (index > 0x7f)
55330 return -EINVAL;
55331 pos += tmp;
55332
55333diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
55334index dada014..1d0d517 100644
55335--- a/drivers/usb/atm/usbatm.c
55336+++ b/drivers/usb/atm/usbatm.c
55337@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55338 if (printk_ratelimit())
55339 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
55340 __func__, vpi, vci);
55341- atomic_inc(&vcc->stats->rx_err);
55342+ atomic_inc_unchecked(&vcc->stats->rx_err);
55343 return;
55344 }
55345
55346@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55347 if (length > ATM_MAX_AAL5_PDU) {
55348 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
55349 __func__, length, vcc);
55350- atomic_inc(&vcc->stats->rx_err);
55351+ atomic_inc_unchecked(&vcc->stats->rx_err);
55352 goto out;
55353 }
55354
55355@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55356 if (sarb->len < pdu_length) {
55357 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
55358 __func__, pdu_length, sarb->len, vcc);
55359- atomic_inc(&vcc->stats->rx_err);
55360+ atomic_inc_unchecked(&vcc->stats->rx_err);
55361 goto out;
55362 }
55363
55364 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
55365 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
55366 __func__, vcc);
55367- atomic_inc(&vcc->stats->rx_err);
55368+ atomic_inc_unchecked(&vcc->stats->rx_err);
55369 goto out;
55370 }
55371
55372@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55373 if (printk_ratelimit())
55374 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
55375 __func__, length);
55376- atomic_inc(&vcc->stats->rx_drop);
55377+ atomic_inc_unchecked(&vcc->stats->rx_drop);
55378 goto out;
55379 }
55380
55381@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55382
55383 vcc->push(vcc, skb);
55384
55385- atomic_inc(&vcc->stats->rx);
55386+ atomic_inc_unchecked(&vcc->stats->rx);
55387 out:
55388 skb_trim(sarb, 0);
55389 }
55390@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
55391 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
55392
55393 usbatm_pop(vcc, skb);
55394- atomic_inc(&vcc->stats->tx);
55395+ atomic_inc_unchecked(&vcc->stats->tx);
55396
55397 skb = skb_dequeue(&instance->sndqueue);
55398 }
55399@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page
55400 if (!left--)
55401 return sprintf(page,
55402 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
55403- atomic_read(&atm_dev->stats.aal5.tx),
55404- atomic_read(&atm_dev->stats.aal5.tx_err),
55405- atomic_read(&atm_dev->stats.aal5.rx),
55406- atomic_read(&atm_dev->stats.aal5.rx_err),
55407- atomic_read(&atm_dev->stats.aal5.rx_drop));
55408+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
55409+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
55410+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
55411+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
55412+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
55413
55414 if (!left--) {
55415 if (instance->disconnected)
55416diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
55417index 2a3bbdf..91d72cf 100644
55418--- a/drivers/usb/core/devices.c
55419+++ b/drivers/usb/core/devices.c
55420@@ -126,7 +126,7 @@ static const char format_endpt[] =
55421 * time it gets called.
55422 */
55423 static struct device_connect_event {
55424- atomic_t count;
55425+ atomic_unchecked_t count;
55426 wait_queue_head_t wait;
55427 } device_event = {
55428 .count = ATOMIC_INIT(1),
55429@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
55430
55431 void usbfs_conn_disc_event(void)
55432 {
55433- atomic_add(2, &device_event.count);
55434+ atomic_add_unchecked(2, &device_event.count);
55435 wake_up(&device_event.wait);
55436 }
55437
55438@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
55439
55440 poll_wait(file, &device_event.wait, wait);
55441
55442- event_count = atomic_read(&device_event.count);
55443+ event_count = atomic_read_unchecked(&device_event.count);
55444 if (file->f_version != event_count) {
55445 file->f_version = event_count;
55446 return POLLIN | POLLRDNORM;
55447diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
55448index 1163553..f292679 100644
55449--- a/drivers/usb/core/devio.c
55450+++ b/drivers/usb/core/devio.c
55451@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
55452 struct usb_dev_state *ps = file->private_data;
55453 struct usb_device *dev = ps->dev;
55454 ssize_t ret = 0;
55455- unsigned len;
55456+ size_t len;
55457 loff_t pos;
55458 int i;
55459
55460@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
55461 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
55462 struct usb_config_descriptor *config =
55463 (struct usb_config_descriptor *)dev->rawdescriptors[i];
55464- unsigned int length = le16_to_cpu(config->wTotalLength);
55465+ size_t length = le16_to_cpu(config->wTotalLength);
55466
55467 if (*ppos < pos + length) {
55468
55469 /* The descriptor may claim to be longer than it
55470 * really is. Here is the actual allocated length. */
55471- unsigned alloclen =
55472+ size_t alloclen =
55473 le16_to_cpu(dev->config[i].desc.wTotalLength);
55474
55475- len = length - (*ppos - pos);
55476+ len = length + pos - *ppos;
55477 if (len > nbytes)
55478 len = nbytes;
55479
55480 /* Simply don't write (skip over) unallocated parts */
55481 if (alloclen > (*ppos - pos)) {
55482- alloclen -= (*ppos - pos);
55483+ alloclen = alloclen + pos - *ppos;
55484 if (copy_to_user(buf,
55485 dev->rawdescriptors[i] + (*ppos - pos),
55486 min(len, alloclen))) {
55487diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
55488index 45a915c..09f9735 100644
55489--- a/drivers/usb/core/hcd.c
55490+++ b/drivers/usb/core/hcd.c
55491@@ -1551,7 +1551,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
55492 */
55493 usb_get_urb(urb);
55494 atomic_inc(&urb->use_count);
55495- atomic_inc(&urb->dev->urbnum);
55496+ atomic_inc_unchecked(&urb->dev->urbnum);
55497 usbmon_urb_submit(&hcd->self, urb);
55498
55499 /* NOTE requirements on root-hub callers (usbfs and the hub
55500@@ -1578,7 +1578,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
55501 urb->hcpriv = NULL;
55502 INIT_LIST_HEAD(&urb->urb_list);
55503 atomic_dec(&urb->use_count);
55504- atomic_dec(&urb->dev->urbnum);
55505+ atomic_dec_unchecked(&urb->dev->urbnum);
55506 if (atomic_read(&urb->reject))
55507 wake_up(&usb_kill_urb_queue);
55508 usb_put_urb(urb);
55509diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
55510index 3b71516..1f26579 100644
55511--- a/drivers/usb/core/hub.c
55512+++ b/drivers/usb/core/hub.c
55513@@ -26,6 +26,7 @@
55514 #include <linux/mutex.h>
55515 #include <linux/random.h>
55516 #include <linux/pm_qos.h>
55517+#include <linux/grsecurity.h>
55518
55519 #include <asm/uaccess.h>
55520 #include <asm/byteorder.h>
55521@@ -4665,6 +4666,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
55522 goto done;
55523 return;
55524 }
55525+
55526+ if (gr_handle_new_usb())
55527+ goto done;
55528+
55529 if (hub_is_superspeed(hub->hdev))
55530 unit_load = 150;
55531 else
55532diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
55533index f368d20..0c30ac5 100644
55534--- a/drivers/usb/core/message.c
55535+++ b/drivers/usb/core/message.c
55536@@ -128,7 +128,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
55537 * Return: If successful, the number of bytes transferred. Otherwise, a negative
55538 * error number.
55539 */
55540-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
55541+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
55542 __u8 requesttype, __u16 value, __u16 index, void *data,
55543 __u16 size, int timeout)
55544 {
55545@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
55546 * If successful, 0. Otherwise a negative error number. The number of actual
55547 * bytes transferred will be stored in the @actual_length parameter.
55548 */
55549-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
55550+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
55551 void *data, int len, int *actual_length, int timeout)
55552 {
55553 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
55554@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
55555 * bytes transferred will be stored in the @actual_length parameter.
55556 *
55557 */
55558-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
55559+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
55560 void *data, int len, int *actual_length, int timeout)
55561 {
55562 struct urb *urb;
55563diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
55564index d269738..7340cd7 100644
55565--- a/drivers/usb/core/sysfs.c
55566+++ b/drivers/usb/core/sysfs.c
55567@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
55568 struct usb_device *udev;
55569
55570 udev = to_usb_device(dev);
55571- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
55572+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
55573 }
55574 static DEVICE_ATTR_RO(urbnum);
55575
55576diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
55577index b1fb9ae..4224885 100644
55578--- a/drivers/usb/core/usb.c
55579+++ b/drivers/usb/core/usb.c
55580@@ -431,7 +431,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
55581 set_dev_node(&dev->dev, dev_to_node(bus->controller));
55582 dev->state = USB_STATE_ATTACHED;
55583 dev->lpm_disable_count = 1;
55584- atomic_set(&dev->urbnum, 0);
55585+ atomic_set_unchecked(&dev->urbnum, 0);
55586
55587 INIT_LIST_HEAD(&dev->ep0.urb_list);
55588 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
55589diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
55590index 8cfc319..4868255 100644
55591--- a/drivers/usb/early/ehci-dbgp.c
55592+++ b/drivers/usb/early/ehci-dbgp.c
55593@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
55594
55595 #ifdef CONFIG_KGDB
55596 static struct kgdb_io kgdbdbgp_io_ops;
55597-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
55598+static struct kgdb_io kgdbdbgp_io_ops_console;
55599+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
55600 #else
55601 #define dbgp_kgdb_mode (0)
55602 #endif
55603@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
55604 .write_char = kgdbdbgp_write_char,
55605 };
55606
55607+static struct kgdb_io kgdbdbgp_io_ops_console = {
55608+ .name = "kgdbdbgp",
55609+ .read_char = kgdbdbgp_read_char,
55610+ .write_char = kgdbdbgp_write_char,
55611+ .is_console = 1
55612+};
55613+
55614 static int kgdbdbgp_wait_time;
55615
55616 static int __init kgdbdbgp_parse_config(char *str)
55617@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
55618 ptr++;
55619 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
55620 }
55621- kgdb_register_io_module(&kgdbdbgp_io_ops);
55622- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
55623+ if (early_dbgp_console.index != -1)
55624+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
55625+ else
55626+ kgdb_register_io_module(&kgdbdbgp_io_ops);
55627
55628 return 0;
55629 }
55630diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
55631index 9719abf..789d5d9 100644
55632--- a/drivers/usb/gadget/function/f_uac1.c
55633+++ b/drivers/usb/gadget/function/f_uac1.c
55634@@ -14,6 +14,7 @@
55635 #include <linux/module.h>
55636 #include <linux/device.h>
55637 #include <linux/atomic.h>
55638+#include <linux/module.h>
55639
55640 #include "u_uac1.h"
55641
55642diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
55643index 491082a..dfd7d17 100644
55644--- a/drivers/usb/gadget/function/u_serial.c
55645+++ b/drivers/usb/gadget/function/u_serial.c
55646@@ -729,9 +729,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
55647 spin_lock_irq(&port->port_lock);
55648
55649 /* already open? Great. */
55650- if (port->port.count) {
55651+ if (atomic_read(&port->port.count)) {
55652 status = 0;
55653- port->port.count++;
55654+ atomic_inc(&port->port.count);
55655
55656 /* currently opening/closing? wait ... */
55657 } else if (port->openclose) {
55658@@ -790,7 +790,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
55659 tty->driver_data = port;
55660 port->port.tty = tty;
55661
55662- port->port.count = 1;
55663+ atomic_set(&port->port.count, 1);
55664 port->openclose = false;
55665
55666 /* if connected, start the I/O stream */
55667@@ -832,11 +832,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
55668
55669 spin_lock_irq(&port->port_lock);
55670
55671- if (port->port.count != 1) {
55672- if (port->port.count == 0)
55673+ if (atomic_read(&port->port.count) != 1) {
55674+ if (atomic_read(&port->port.count) == 0)
55675 WARN_ON(1);
55676 else
55677- --port->port.count;
55678+ atomic_dec(&port->port.count);
55679 goto exit;
55680 }
55681
55682@@ -846,7 +846,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
55683 * and sleep if necessary
55684 */
55685 port->openclose = true;
55686- port->port.count = 0;
55687+ atomic_set(&port->port.count, 0);
55688
55689 gser = port->port_usb;
55690 if (gser && gser->disconnect)
55691@@ -1062,7 +1062,7 @@ static int gs_closed(struct gs_port *port)
55692 int cond;
55693
55694 spin_lock_irq(&port->port_lock);
55695- cond = (port->port.count == 0) && !port->openclose;
55696+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
55697 spin_unlock_irq(&port->port_lock);
55698 return cond;
55699 }
55700@@ -1205,7 +1205,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
55701 /* if it's already open, start I/O ... and notify the serial
55702 * protocol about open/close status (connect/disconnect).
55703 */
55704- if (port->port.count) {
55705+ if (atomic_read(&port->port.count)) {
55706 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
55707 gs_start_io(port);
55708 if (gser->connect)
55709@@ -1252,7 +1252,7 @@ void gserial_disconnect(struct gserial *gser)
55710
55711 port->port_usb = NULL;
55712 gser->ioport = NULL;
55713- if (port->port.count > 0 || port->openclose) {
55714+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
55715 wake_up_interruptible(&port->drain_wait);
55716 if (port->port.tty)
55717 tty_hangup(port->port.tty);
55718@@ -1268,7 +1268,7 @@ void gserial_disconnect(struct gserial *gser)
55719
55720 /* finally, free any unused/unusable I/O buffers */
55721 spin_lock_irqsave(&port->port_lock, flags);
55722- if (port->port.count == 0 && !port->openclose)
55723+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
55724 gs_buf_free(&port->port_write_buf);
55725 gs_free_requests(gser->out, &port->read_pool, NULL);
55726 gs_free_requests(gser->out, &port->read_queue, NULL);
55727diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1.c
55728index c78c841..48fd281 100644
55729--- a/drivers/usb/gadget/function/u_uac1.c
55730+++ b/drivers/usb/gadget/function/u_uac1.c
55731@@ -17,6 +17,7 @@
55732 #include <linux/ctype.h>
55733 #include <linux/random.h>
55734 #include <linux/syscalls.h>
55735+#include <linux/module.h>
55736
55737 #include "u_uac1.h"
55738
55739diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
55740index 7354d01..299478e 100644
55741--- a/drivers/usb/host/ehci-hub.c
55742+++ b/drivers/usb/host/ehci-hub.c
55743@@ -772,7 +772,7 @@ static struct urb *request_single_step_set_feature_urb(
55744 urb->transfer_flags = URB_DIR_IN;
55745 usb_get_urb(urb);
55746 atomic_inc(&urb->use_count);
55747- atomic_inc(&urb->dev->urbnum);
55748+ atomic_inc_unchecked(&urb->dev->urbnum);
55749 urb->setup_dma = dma_map_single(
55750 hcd->self.controller,
55751 urb->setup_packet,
55752@@ -839,7 +839,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
55753 urb->status = -EINPROGRESS;
55754 usb_get_urb(urb);
55755 atomic_inc(&urb->use_count);
55756- atomic_inc(&urb->dev->urbnum);
55757+ atomic_inc_unchecked(&urb->dev->urbnum);
55758 retval = submit_single_step_set_feature(hcd, urb, 0);
55759 if (!retval && !wait_for_completion_timeout(&done,
55760 msecs_to_jiffies(2000))) {
55761diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
55762index 1db0626..4948782 100644
55763--- a/drivers/usb/host/hwa-hc.c
55764+++ b/drivers/usb/host/hwa-hc.c
55765@@ -337,7 +337,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
55766 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
55767 struct wahc *wa = &hwahc->wa;
55768 struct device *dev = &wa->usb_iface->dev;
55769- u8 mas_le[UWB_NUM_MAS/8];
55770+ u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL);
55771+
55772+ if (mas_le == NULL)
55773+ return -ENOMEM;
55774
55775 /* Set the stream index */
55776 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
55777@@ -356,10 +359,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
55778 WUSB_REQ_SET_WUSB_MAS,
55779 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
55780 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
55781- mas_le, 32, USB_CTRL_SET_TIMEOUT);
55782+ mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT);
55783 if (result < 0)
55784 dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
55785 out:
55786+ kfree(mas_le);
55787+
55788 return result;
55789 }
55790
55791diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
55792index b3d245e..99549ed 100644
55793--- a/drivers/usb/misc/appledisplay.c
55794+++ b/drivers/usb/misc/appledisplay.c
55795@@ -84,7 +84,7 @@ struct appledisplay {
55796 struct mutex sysfslock; /* concurrent read and write */
55797 };
55798
55799-static atomic_t count_displays = ATOMIC_INIT(0);
55800+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
55801 static struct workqueue_struct *wq;
55802
55803 static void appledisplay_complete(struct urb *urb)
55804@@ -288,7 +288,7 @@ static int appledisplay_probe(struct usb_interface *iface,
55805
55806 /* Register backlight device */
55807 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
55808- atomic_inc_return(&count_displays) - 1);
55809+ atomic_inc_return_unchecked(&count_displays) - 1);
55810 memset(&props, 0, sizeof(struct backlight_properties));
55811 props.type = BACKLIGHT_RAW;
55812 props.max_brightness = 0xff;
55813diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
55814index 3806e70..55c508b 100644
55815--- a/drivers/usb/serial/console.c
55816+++ b/drivers/usb/serial/console.c
55817@@ -126,7 +126,7 @@ static int usb_console_setup(struct console *co, char *options)
55818
55819 info->port = port;
55820
55821- ++port->port.count;
55822+ atomic_inc(&port->port.count);
55823 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
55824 if (serial->type->set_termios) {
55825 /*
55826@@ -175,7 +175,7 @@ static int usb_console_setup(struct console *co, char *options)
55827 }
55828 /* Now that any required fake tty operations are completed restore
55829 * the tty port count */
55830- --port->port.count;
55831+ atomic_dec(&port->port.count);
55832 /* The console is special in terms of closing the device so
55833 * indicate this port is now acting as a system console. */
55834 port->port.console = 1;
55835@@ -188,7 +188,7 @@ static int usb_console_setup(struct console *co, char *options)
55836 put_tty:
55837 tty_kref_put(tty);
55838 reset_open_count:
55839- port->port.count = 0;
55840+ atomic_set(&port->port.count, 0);
55841 usb_autopm_put_interface(serial->interface);
55842 error_get_interface:
55843 usb_serial_put(serial);
55844@@ -199,7 +199,7 @@ static int usb_console_setup(struct console *co, char *options)
55845 static void usb_console_write(struct console *co,
55846 const char *buf, unsigned count)
55847 {
55848- static struct usbcons_info *info = &usbcons_info;
55849+ struct usbcons_info *info = &usbcons_info;
55850 struct usb_serial_port *port = info->port;
55851 struct usb_serial *serial;
55852 int retval = -ENODEV;
55853diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
55854index 307e339..6aa97cb 100644
55855--- a/drivers/usb/storage/usb.h
55856+++ b/drivers/usb/storage/usb.h
55857@@ -63,7 +63,7 @@ struct us_unusual_dev {
55858 __u8 useProtocol;
55859 __u8 useTransport;
55860 int (*initFunction)(struct us_data *);
55861-};
55862+} __do_const;
55863
55864
55865 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
55866diff --git a/drivers/usb/usbip/vhci.h b/drivers/usb/usbip/vhci.h
55867index a863a98..d272795 100644
55868--- a/drivers/usb/usbip/vhci.h
55869+++ b/drivers/usb/usbip/vhci.h
55870@@ -83,7 +83,7 @@ struct vhci_hcd {
55871 unsigned resuming:1;
55872 unsigned long re_timeout;
55873
55874- atomic_t seqnum;
55875+ atomic_unchecked_t seqnum;
55876
55877 /*
55878 * NOTE:
55879diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
55880index 11f6f61..1087910 100644
55881--- a/drivers/usb/usbip/vhci_hcd.c
55882+++ b/drivers/usb/usbip/vhci_hcd.c
55883@@ -440,7 +440,7 @@ static void vhci_tx_urb(struct urb *urb)
55884
55885 spin_lock(&vdev->priv_lock);
55886
55887- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
55888+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
55889 if (priv->seqnum == 0xffff)
55890 dev_info(&urb->dev->dev, "seqnum max\n");
55891
55892@@ -685,7 +685,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
55893 return -ENOMEM;
55894 }
55895
55896- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
55897+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
55898 if (unlink->seqnum == 0xffff)
55899 pr_info("seqnum max\n");
55900
55901@@ -889,7 +889,7 @@ static int vhci_start(struct usb_hcd *hcd)
55902 vdev->rhport = rhport;
55903 }
55904
55905- atomic_set(&vhci->seqnum, 0);
55906+ atomic_set_unchecked(&vhci->seqnum, 0);
55907 spin_lock_init(&vhci->lock);
55908
55909 hcd->power_budget = 0; /* no limit */
55910diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
55911index 00e4a54..d676f85 100644
55912--- a/drivers/usb/usbip/vhci_rx.c
55913+++ b/drivers/usb/usbip/vhci_rx.c
55914@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
55915 if (!urb) {
55916 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
55917 pr_info("max seqnum %d\n",
55918- atomic_read(&the_controller->seqnum));
55919+ atomic_read_unchecked(&the_controller->seqnum));
55920 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
55921 return;
55922 }
55923diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
55924index edc7267..9f65ce2 100644
55925--- a/drivers/usb/wusbcore/wa-hc.h
55926+++ b/drivers/usb/wusbcore/wa-hc.h
55927@@ -240,7 +240,7 @@ struct wahc {
55928 spinlock_t xfer_list_lock;
55929 struct work_struct xfer_enqueue_work;
55930 struct work_struct xfer_error_work;
55931- atomic_t xfer_id_count;
55932+ atomic_unchecked_t xfer_id_count;
55933
55934 kernel_ulong_t quirks;
55935 };
55936@@ -305,7 +305,7 @@ static inline void wa_init(struct wahc *wa)
55937 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
55938 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
55939 wa->dto_in_use = 0;
55940- atomic_set(&wa->xfer_id_count, 1);
55941+ atomic_set_unchecked(&wa->xfer_id_count, 1);
55942 /* init the buf in URBs */
55943 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
55944 usb_init_urb(&(wa->buf_in_urbs[index]));
55945diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
55946index 69af4fd..da390d7 100644
55947--- a/drivers/usb/wusbcore/wa-xfer.c
55948+++ b/drivers/usb/wusbcore/wa-xfer.c
55949@@ -314,7 +314,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
55950 */
55951 static void wa_xfer_id_init(struct wa_xfer *xfer)
55952 {
55953- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
55954+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
55955 }
55956
55957 /* Return the xfer's ID. */
55958diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
55959index 837d177..170724af 100644
55960--- a/drivers/vfio/vfio.c
55961+++ b/drivers/vfio/vfio.c
55962@@ -518,7 +518,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
55963 return 0;
55964
55965 /* TODO Prevent device auto probing */
55966- WARN("Device %s added to live group %d!\n", dev_name(dev),
55967+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
55968 iommu_group_id(group->iommu_group));
55969
55970 return 0;
55971diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
55972index 3bb02c6..a01ff38 100644
55973--- a/drivers/vhost/vringh.c
55974+++ b/drivers/vhost/vringh.c
55975@@ -551,7 +551,7 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
55976 static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
55977 {
55978 __virtio16 v = 0;
55979- int rc = get_user(v, (__force __virtio16 __user *)p);
55980+ int rc = get_user(v, (__force_user __virtio16 *)p);
55981 *val = vringh16_to_cpu(vrh, v);
55982 return rc;
55983 }
55984@@ -559,12 +559,12 @@ static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio
55985 static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
55986 {
55987 __virtio16 v = cpu_to_vringh16(vrh, val);
55988- return put_user(v, (__force __virtio16 __user *)p);
55989+ return put_user(v, (__force_user __virtio16 *)p);
55990 }
55991
55992 static inline int copydesc_user(void *dst, const void *src, size_t len)
55993 {
55994- return copy_from_user(dst, (__force void __user *)src, len) ?
55995+ return copy_from_user(dst, (void __force_user *)src, len) ?
55996 -EFAULT : 0;
55997 }
55998
55999@@ -572,19 +572,19 @@ static inline int putused_user(struct vring_used_elem *dst,
56000 const struct vring_used_elem *src,
56001 unsigned int num)
56002 {
56003- return copy_to_user((__force void __user *)dst, src,
56004+ return copy_to_user((void __force_user *)dst, src,
56005 sizeof(*dst) * num) ? -EFAULT : 0;
56006 }
56007
56008 static inline int xfer_from_user(void *src, void *dst, size_t len)
56009 {
56010- return copy_from_user(dst, (__force void __user *)src, len) ?
56011+ return copy_from_user(dst, (void __force_user *)src, len) ?
56012 -EFAULT : 0;
56013 }
56014
56015 static inline int xfer_to_user(void *dst, void *src, size_t len)
56016 {
56017- return copy_to_user((__force void __user *)dst, src, len) ?
56018+ return copy_to_user((void __force_user *)dst, src, len) ?
56019 -EFAULT : 0;
56020 }
56021
56022@@ -621,9 +621,9 @@ int vringh_init_user(struct vringh *vrh, u64 features,
56023 vrh->last_used_idx = 0;
56024 vrh->vring.num = num;
56025 /* vring expects kernel addresses, but only used via accessors. */
56026- vrh->vring.desc = (__force struct vring_desc *)desc;
56027- vrh->vring.avail = (__force struct vring_avail *)avail;
56028- vrh->vring.used = (__force struct vring_used *)used;
56029+ vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
56030+ vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
56031+ vrh->vring.used = (__force_kernel struct vring_used *)used;
56032 return 0;
56033 }
56034 EXPORT_SYMBOL(vringh_init_user);
56035@@ -826,7 +826,7 @@ static inline int getu16_kern(const struct vringh *vrh,
56036
56037 static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
56038 {
56039- ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val);
56040+ ACCESS_ONCE_RW(*p) = cpu_to_vringh16(vrh, val);
56041 return 0;
56042 }
56043
56044diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
56045index 84a110a..96312c3 100644
56046--- a/drivers/video/backlight/kb3886_bl.c
56047+++ b/drivers/video/backlight/kb3886_bl.c
56048@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
56049 static unsigned long kb3886bl_flags;
56050 #define KB3886BL_SUSPENDED 0x01
56051
56052-static struct dmi_system_id kb3886bl_device_table[] __initdata = {
56053+static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
56054 {
56055 .ident = "Sahara Touch-iT",
56056 .matches = {
56057diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
56058index 1b0b233..6f34c2c 100644
56059--- a/drivers/video/fbdev/arcfb.c
56060+++ b/drivers/video/fbdev/arcfb.c
56061@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
56062 return -ENOSPC;
56063
56064 err = 0;
56065- if ((count + p) > fbmemlength) {
56066+ if (count > (fbmemlength - p)) {
56067 count = fbmemlength - p;
56068 err = -ENOSPC;
56069 }
56070diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
56071index aedf2fb..47c9aca 100644
56072--- a/drivers/video/fbdev/aty/aty128fb.c
56073+++ b/drivers/video/fbdev/aty/aty128fb.c
56074@@ -149,7 +149,7 @@ enum {
56075 };
56076
56077 /* Must match above enum */
56078-static char * const r128_family[] = {
56079+static const char * const r128_family[] = {
56080 "AGP",
56081 "PCI",
56082 "PRO AGP",
56083diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
56084index 8789e48..698fe4c 100644
56085--- a/drivers/video/fbdev/aty/atyfb_base.c
56086+++ b/drivers/video/fbdev/aty/atyfb_base.c
56087@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
56088 par->accel_flags = var->accel_flags; /* hack */
56089
56090 if (var->accel_flags) {
56091- info->fbops->fb_sync = atyfb_sync;
56092+ pax_open_kernel();
56093+ *(void **)&info->fbops->fb_sync = atyfb_sync;
56094+ pax_close_kernel();
56095 info->flags &= ~FBINFO_HWACCEL_DISABLED;
56096 } else {
56097- info->fbops->fb_sync = NULL;
56098+ pax_open_kernel();
56099+ *(void **)&info->fbops->fb_sync = NULL;
56100+ pax_close_kernel();
56101 info->flags |= FBINFO_HWACCEL_DISABLED;
56102 }
56103
56104diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
56105index 2fa0317..4983f2a 100644
56106--- a/drivers/video/fbdev/aty/mach64_cursor.c
56107+++ b/drivers/video/fbdev/aty/mach64_cursor.c
56108@@ -8,6 +8,7 @@
56109 #include "../core/fb_draw.h"
56110
56111 #include <asm/io.h>
56112+#include <asm/pgtable.h>
56113
56114 #ifdef __sparc__
56115 #include <asm/fbio.h>
56116@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
56117 info->sprite.buf_align = 16; /* and 64 lines tall. */
56118 info->sprite.flags = FB_PIXMAP_IO;
56119
56120- info->fbops->fb_cursor = atyfb_cursor;
56121+ pax_open_kernel();
56122+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
56123+ pax_close_kernel();
56124
56125 return 0;
56126 }
56127diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
56128index d6cab1f..112f680 100644
56129--- a/drivers/video/fbdev/core/fb_defio.c
56130+++ b/drivers/video/fbdev/core/fb_defio.c
56131@@ -207,7 +207,9 @@ void fb_deferred_io_init(struct fb_info *info)
56132
56133 BUG_ON(!fbdefio);
56134 mutex_init(&fbdefio->lock);
56135- info->fbops->fb_mmap = fb_deferred_io_mmap;
56136+ pax_open_kernel();
56137+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
56138+ pax_close_kernel();
56139 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
56140 INIT_LIST_HEAD(&fbdefio->pagelist);
56141 if (fbdefio->delay == 0) /* set a default of 1 s */
56142@@ -238,7 +240,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
56143 page->mapping = NULL;
56144 }
56145
56146- info->fbops->fb_mmap = NULL;
56147+ *(void **)&info->fbops->fb_mmap = NULL;
56148 mutex_destroy(&fbdefio->lock);
56149 }
56150 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
56151diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
56152index 0705d88..d9429bf 100644
56153--- a/drivers/video/fbdev/core/fbmem.c
56154+++ b/drivers/video/fbdev/core/fbmem.c
56155@@ -1301,7 +1301,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
56156 __u32 data;
56157 int err;
56158
56159- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
56160+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
56161
56162 data = (__u32) (unsigned long) fix->smem_start;
56163 err |= put_user(data, &fix32->smem_start);
56164diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
56165index 4254336..282567e 100644
56166--- a/drivers/video/fbdev/hyperv_fb.c
56167+++ b/drivers/video/fbdev/hyperv_fb.c
56168@@ -240,7 +240,7 @@ static uint screen_fb_size;
56169 static inline int synthvid_send(struct hv_device *hdev,
56170 struct synthvid_msg *msg)
56171 {
56172- static atomic64_t request_id = ATOMIC64_INIT(0);
56173+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
56174 int ret;
56175
56176 msg->pipe_hdr.type = PIPE_MSG_DATA;
56177@@ -248,7 +248,7 @@ static inline int synthvid_send(struct hv_device *hdev,
56178
56179 ret = vmbus_sendpacket(hdev->channel, msg,
56180 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
56181- atomic64_inc_return(&request_id),
56182+ atomic64_inc_return_unchecked(&request_id),
56183 VM_PKT_DATA_INBAND, 0);
56184
56185 if (ret)
56186diff --git a/drivers/video/fbdev/i810/i810_accel.c b/drivers/video/fbdev/i810/i810_accel.c
56187index 7672d2e..b56437f 100644
56188--- a/drivers/video/fbdev/i810/i810_accel.c
56189+++ b/drivers/video/fbdev/i810/i810_accel.c
56190@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
56191 }
56192 }
56193 printk("ringbuffer lockup!!!\n");
56194+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
56195 i810_report_error(mmio);
56196 par->dev_flags |= LOCKUP;
56197 info->pixmap.scan_align = 1;
56198diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
56199index a01147f..5d896f8 100644
56200--- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
56201+++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
56202@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
56203
56204 #ifdef CONFIG_FB_MATROX_MYSTIQUE
56205 struct matrox_switch matrox_mystique = {
56206- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
56207+ .preinit = MGA1064_preinit,
56208+ .reset = MGA1064_reset,
56209+ .init = MGA1064_init,
56210+ .restore = MGA1064_restore,
56211 };
56212 EXPORT_SYMBOL(matrox_mystique);
56213 #endif
56214
56215 #ifdef CONFIG_FB_MATROX_G
56216 struct matrox_switch matrox_G100 = {
56217- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
56218+ .preinit = MGAG100_preinit,
56219+ .reset = MGAG100_reset,
56220+ .init = MGAG100_init,
56221+ .restore = MGAG100_restore,
56222 };
56223 EXPORT_SYMBOL(matrox_G100);
56224 #endif
56225diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
56226index 195ad7c..09743fc 100644
56227--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
56228+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
56229@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
56230 }
56231
56232 struct matrox_switch matrox_millennium = {
56233- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
56234+ .preinit = Ti3026_preinit,
56235+ .reset = Ti3026_reset,
56236+ .init = Ti3026_init,
56237+ .restore = Ti3026_restore
56238 };
56239 EXPORT_SYMBOL(matrox_millennium);
56240 #endif
56241diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
56242index fe92eed..106e085 100644
56243--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
56244+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
56245@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
56246 struct mb862xxfb_par *par = info->par;
56247
56248 if (info->var.bits_per_pixel == 32) {
56249- info->fbops->fb_fillrect = cfb_fillrect;
56250- info->fbops->fb_copyarea = cfb_copyarea;
56251- info->fbops->fb_imageblit = cfb_imageblit;
56252+ pax_open_kernel();
56253+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
56254+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
56255+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
56256+ pax_close_kernel();
56257 } else {
56258 outreg(disp, GC_L0EM, 3);
56259- info->fbops->fb_fillrect = mb86290fb_fillrect;
56260- info->fbops->fb_copyarea = mb86290fb_copyarea;
56261- info->fbops->fb_imageblit = mb86290fb_imageblit;
56262+ pax_open_kernel();
56263+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
56264+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
56265+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
56266+ pax_close_kernel();
56267 }
56268 outreg(draw, GDC_REG_DRAW_BASE, 0);
56269 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
56270diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
56271index def0412..fed6529 100644
56272--- a/drivers/video/fbdev/nvidia/nvidia.c
56273+++ b/drivers/video/fbdev/nvidia/nvidia.c
56274@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
56275 info->fix.line_length = (info->var.xres_virtual *
56276 info->var.bits_per_pixel) >> 3;
56277 if (info->var.accel_flags) {
56278- info->fbops->fb_imageblit = nvidiafb_imageblit;
56279- info->fbops->fb_fillrect = nvidiafb_fillrect;
56280- info->fbops->fb_copyarea = nvidiafb_copyarea;
56281- info->fbops->fb_sync = nvidiafb_sync;
56282+ pax_open_kernel();
56283+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
56284+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
56285+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
56286+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
56287+ pax_close_kernel();
56288 info->pixmap.scan_align = 4;
56289 info->flags &= ~FBINFO_HWACCEL_DISABLED;
56290 info->flags |= FBINFO_READS_FAST;
56291 NVResetGraphics(info);
56292 } else {
56293- info->fbops->fb_imageblit = cfb_imageblit;
56294- info->fbops->fb_fillrect = cfb_fillrect;
56295- info->fbops->fb_copyarea = cfb_copyarea;
56296- info->fbops->fb_sync = NULL;
56297+ pax_open_kernel();
56298+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
56299+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
56300+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
56301+ *(void **)&info->fbops->fb_sync = NULL;
56302+ pax_close_kernel();
56303 info->pixmap.scan_align = 1;
56304 info->flags |= FBINFO_HWACCEL_DISABLED;
56305 info->flags &= ~FBINFO_READS_FAST;
56306@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
56307 info->pixmap.size = 8 * 1024;
56308 info->pixmap.flags = FB_PIXMAP_SYSTEM;
56309
56310- if (!hwcur)
56311- info->fbops->fb_cursor = NULL;
56312+ if (!hwcur) {
56313+ pax_open_kernel();
56314+ *(void **)&info->fbops->fb_cursor = NULL;
56315+ pax_close_kernel();
56316+ }
56317
56318 info->var.accel_flags = (!noaccel);
56319
56320diff --git a/drivers/video/fbdev/omap2/dss/display.c b/drivers/video/fbdev/omap2/dss/display.c
56321index 2412a0d..294215b 100644
56322--- a/drivers/video/fbdev/omap2/dss/display.c
56323+++ b/drivers/video/fbdev/omap2/dss/display.c
56324@@ -161,12 +161,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
56325 if (dssdev->name == NULL)
56326 dssdev->name = dssdev->alias;
56327
56328+ pax_open_kernel();
56329 if (drv && drv->get_resolution == NULL)
56330- drv->get_resolution = omapdss_default_get_resolution;
56331+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
56332 if (drv && drv->get_recommended_bpp == NULL)
56333- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
56334+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
56335 if (drv && drv->get_timings == NULL)
56336- drv->get_timings = omapdss_default_get_timings;
56337+ *(void **)&drv->get_timings = omapdss_default_get_timings;
56338+ pax_close_kernel();
56339
56340 mutex_lock(&panel_list_mutex);
56341 list_add_tail(&dssdev->panel_list, &panel_list);
56342diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
56343index 83433cb..71e9b98 100644
56344--- a/drivers/video/fbdev/s1d13xxxfb.c
56345+++ b/drivers/video/fbdev/s1d13xxxfb.c
56346@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
56347
56348 switch(prod_id) {
56349 case S1D13506_PROD_ID: /* activate acceleration */
56350- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
56351- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
56352+ pax_open_kernel();
56353+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
56354+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
56355+ pax_close_kernel();
56356 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
56357 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
56358 break;
56359diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
56360index d3013cd..95b8285 100644
56361--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
56362+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
56363@@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle)
56364 }
56365
56366 static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
56367- lcdc_sys_write_index,
56368- lcdc_sys_write_data,
56369- lcdc_sys_read_data,
56370+ .write_index = lcdc_sys_write_index,
56371+ .write_data = lcdc_sys_write_data,
56372+ .read_data = lcdc_sys_read_data,
56373 };
56374
56375 static int sh_mobile_lcdc_sginit(struct fb_info *info,
56376diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
56377index 9279e5f..d5f5276 100644
56378--- a/drivers/video/fbdev/smscufx.c
56379+++ b/drivers/video/fbdev/smscufx.c
56380@@ -1174,7 +1174,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
56381 fb_deferred_io_cleanup(info);
56382 kfree(info->fbdefio);
56383 info->fbdefio = NULL;
56384- info->fbops->fb_mmap = ufx_ops_mmap;
56385+ pax_open_kernel();
56386+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
56387+ pax_close_kernel();
56388 }
56389
56390 pr_debug("released /dev/fb%d user=%d count=%d",
56391diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
56392index ff2b873..626a8d5 100644
56393--- a/drivers/video/fbdev/udlfb.c
56394+++ b/drivers/video/fbdev/udlfb.c
56395@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
56396 dlfb_urb_completion(urb);
56397
56398 error:
56399- atomic_add(bytes_sent, &dev->bytes_sent);
56400- atomic_add(bytes_identical, &dev->bytes_identical);
56401- atomic_add(width*height*2, &dev->bytes_rendered);
56402+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
56403+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
56404+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
56405 end_cycles = get_cycles();
56406- atomic_add(((unsigned int) ((end_cycles - start_cycles)
56407+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
56408 >> 10)), /* Kcycles */
56409 &dev->cpu_kcycles_used);
56410
56411@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
56412 dlfb_urb_completion(urb);
56413
56414 error:
56415- atomic_add(bytes_sent, &dev->bytes_sent);
56416- atomic_add(bytes_identical, &dev->bytes_identical);
56417- atomic_add(bytes_rendered, &dev->bytes_rendered);
56418+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
56419+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
56420+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
56421 end_cycles = get_cycles();
56422- atomic_add(((unsigned int) ((end_cycles - start_cycles)
56423+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
56424 >> 10)), /* Kcycles */
56425 &dev->cpu_kcycles_used);
56426 }
56427@@ -991,7 +991,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
56428 fb_deferred_io_cleanup(info);
56429 kfree(info->fbdefio);
56430 info->fbdefio = NULL;
56431- info->fbops->fb_mmap = dlfb_ops_mmap;
56432+ pax_open_kernel();
56433+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
56434+ pax_close_kernel();
56435 }
56436
56437 pr_warn("released /dev/fb%d user=%d count=%d\n",
56438@@ -1373,7 +1375,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
56439 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56440 struct dlfb_data *dev = fb_info->par;
56441 return snprintf(buf, PAGE_SIZE, "%u\n",
56442- atomic_read(&dev->bytes_rendered));
56443+ atomic_read_unchecked(&dev->bytes_rendered));
56444 }
56445
56446 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
56447@@ -1381,7 +1383,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
56448 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56449 struct dlfb_data *dev = fb_info->par;
56450 return snprintf(buf, PAGE_SIZE, "%u\n",
56451- atomic_read(&dev->bytes_identical));
56452+ atomic_read_unchecked(&dev->bytes_identical));
56453 }
56454
56455 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
56456@@ -1389,7 +1391,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
56457 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56458 struct dlfb_data *dev = fb_info->par;
56459 return snprintf(buf, PAGE_SIZE, "%u\n",
56460- atomic_read(&dev->bytes_sent));
56461+ atomic_read_unchecked(&dev->bytes_sent));
56462 }
56463
56464 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
56465@@ -1397,7 +1399,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
56466 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56467 struct dlfb_data *dev = fb_info->par;
56468 return snprintf(buf, PAGE_SIZE, "%u\n",
56469- atomic_read(&dev->cpu_kcycles_used));
56470+ atomic_read_unchecked(&dev->cpu_kcycles_used));
56471 }
56472
56473 static ssize_t edid_show(
56474@@ -1457,10 +1459,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
56475 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56476 struct dlfb_data *dev = fb_info->par;
56477
56478- atomic_set(&dev->bytes_rendered, 0);
56479- atomic_set(&dev->bytes_identical, 0);
56480- atomic_set(&dev->bytes_sent, 0);
56481- atomic_set(&dev->cpu_kcycles_used, 0);
56482+ atomic_set_unchecked(&dev->bytes_rendered, 0);
56483+ atomic_set_unchecked(&dev->bytes_identical, 0);
56484+ atomic_set_unchecked(&dev->bytes_sent, 0);
56485+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
56486
56487 return count;
56488 }
56489diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
56490index d32d1c4..46722e6 100644
56491--- a/drivers/video/fbdev/uvesafb.c
56492+++ b/drivers/video/fbdev/uvesafb.c
56493@@ -19,6 +19,7 @@
56494 #include <linux/io.h>
56495 #include <linux/mutex.h>
56496 #include <linux/slab.h>
56497+#include <linux/moduleloader.h>
56498 #include <video/edid.h>
56499 #include <video/uvesafb.h>
56500 #ifdef CONFIG_X86
56501@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
56502 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
56503 par->pmi_setpal = par->ypan = 0;
56504 } else {
56505+
56506+#ifdef CONFIG_PAX_KERNEXEC
56507+#ifdef CONFIG_MODULES
56508+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
56509+#endif
56510+ if (!par->pmi_code) {
56511+ par->pmi_setpal = par->ypan = 0;
56512+ return 0;
56513+ }
56514+#endif
56515+
56516 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
56517 + task->t.regs.edi);
56518+
56519+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56520+ pax_open_kernel();
56521+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
56522+ pax_close_kernel();
56523+
56524+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
56525+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
56526+#else
56527 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
56528 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
56529+#endif
56530+
56531 printk(KERN_INFO "uvesafb: protected mode interface info at "
56532 "%04x:%04x\n",
56533 (u16)task->t.regs.es, (u16)task->t.regs.edi);
56534@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
56535 par->ypan = ypan;
56536
56537 if (par->pmi_setpal || par->ypan) {
56538+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
56539 if (__supported_pte_mask & _PAGE_NX) {
56540 par->pmi_setpal = par->ypan = 0;
56541 printk(KERN_WARNING "uvesafb: NX protection is active, "
56542 "better not use the PMI.\n");
56543- } else {
56544+ } else
56545+#endif
56546 uvesafb_vbe_getpmi(task, par);
56547- }
56548 }
56549 #else
56550 /* The protected mode interface is not available on non-x86. */
56551@@ -1452,8 +1476,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
56552 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
56553
56554 /* Disable blanking if the user requested so. */
56555- if (!blank)
56556- info->fbops->fb_blank = NULL;
56557+ if (!blank) {
56558+ pax_open_kernel();
56559+ *(void **)&info->fbops->fb_blank = NULL;
56560+ pax_close_kernel();
56561+ }
56562
56563 /*
56564 * Find out how much IO memory is required for the mode with
56565@@ -1524,8 +1551,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
56566 info->flags = FBINFO_FLAG_DEFAULT |
56567 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
56568
56569- if (!par->ypan)
56570- info->fbops->fb_pan_display = NULL;
56571+ if (!par->ypan) {
56572+ pax_open_kernel();
56573+ *(void **)&info->fbops->fb_pan_display = NULL;
56574+ pax_close_kernel();
56575+ }
56576 }
56577
56578 static void uvesafb_init_mtrr(struct fb_info *info)
56579@@ -1786,6 +1816,11 @@ out_mode:
56580 out:
56581 kfree(par->vbe_modes);
56582
56583+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56584+ if (par->pmi_code)
56585+ module_memfree_exec(par->pmi_code);
56586+#endif
56587+
56588 framebuffer_release(info);
56589 return err;
56590 }
56591@@ -1810,6 +1845,11 @@ static int uvesafb_remove(struct platform_device *dev)
56592 kfree(par->vbe_state_orig);
56593 kfree(par->vbe_state_saved);
56594
56595+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56596+ if (par->pmi_code)
56597+ module_memfree_exec(par->pmi_code);
56598+#endif
56599+
56600 framebuffer_release(info);
56601 }
56602 return 0;
56603diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
56604index d79a0ac..2d0c3d4 100644
56605--- a/drivers/video/fbdev/vesafb.c
56606+++ b/drivers/video/fbdev/vesafb.c
56607@@ -9,6 +9,7 @@
56608 */
56609
56610 #include <linux/module.h>
56611+#include <linux/moduleloader.h>
56612 #include <linux/kernel.h>
56613 #include <linux/errno.h>
56614 #include <linux/string.h>
56615@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
56616 static int vram_total; /* Set total amount of memory */
56617 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
56618 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
56619-static void (*pmi_start)(void) __read_mostly;
56620-static void (*pmi_pal) (void) __read_mostly;
56621+static void (*pmi_start)(void) __read_only;
56622+static void (*pmi_pal) (void) __read_only;
56623 static int depth __read_mostly;
56624 static int vga_compat __read_mostly;
56625 /* --------------------------------------------------------------------- */
56626@@ -233,6 +234,7 @@ static int vesafb_probe(struct platform_device *dev)
56627 unsigned int size_remap;
56628 unsigned int size_total;
56629 char *option = NULL;
56630+ void *pmi_code = NULL;
56631
56632 /* ignore error return of fb_get_options */
56633 fb_get_options("vesafb", &option);
56634@@ -279,10 +281,6 @@ static int vesafb_probe(struct platform_device *dev)
56635 size_remap = size_total;
56636 vesafb_fix.smem_len = size_remap;
56637
56638-#ifndef __i386__
56639- screen_info.vesapm_seg = 0;
56640-#endif
56641-
56642 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
56643 printk(KERN_WARNING
56644 "vesafb: cannot reserve video memory at 0x%lx\n",
56645@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
56646 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
56647 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
56648
56649+#ifdef __i386__
56650+
56651+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56652+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
56653+ if (!pmi_code)
56654+#elif !defined(CONFIG_PAX_KERNEXEC)
56655+ if (0)
56656+#endif
56657+
56658+#endif
56659+ screen_info.vesapm_seg = 0;
56660+
56661 if (screen_info.vesapm_seg) {
56662- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
56663- screen_info.vesapm_seg,screen_info.vesapm_off);
56664+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
56665+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
56666 }
56667
56668 if (screen_info.vesapm_seg < 0xc000)
56669@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
56670
56671 if (ypan || pmi_setpal) {
56672 unsigned short *pmi_base;
56673+
56674 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
56675- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
56676- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
56677+
56678+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56679+ pax_open_kernel();
56680+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
56681+#else
56682+ pmi_code = pmi_base;
56683+#endif
56684+
56685+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
56686+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
56687+
56688+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56689+ pmi_start = ktva_ktla(pmi_start);
56690+ pmi_pal = ktva_ktla(pmi_pal);
56691+ pax_close_kernel();
56692+#endif
56693+
56694 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
56695 if (pmi_base[3]) {
56696 printk(KERN_INFO "vesafb: pmi: ports = ");
56697@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
56698 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
56699 (ypan ? FBINFO_HWACCEL_YPAN : 0);
56700
56701- if (!ypan)
56702- info->fbops->fb_pan_display = NULL;
56703+ if (!ypan) {
56704+ pax_open_kernel();
56705+ *(void **)&info->fbops->fb_pan_display = NULL;
56706+ pax_close_kernel();
56707+ }
56708
56709 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
56710 err = -ENOMEM;
56711@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
56712 fb_info(info, "%s frame buffer device\n", info->fix.id);
56713 return 0;
56714 err:
56715+
56716+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56717+ module_memfree_exec(pmi_code);
56718+#endif
56719+
56720 if (info->screen_base)
56721 iounmap(info->screen_base);
56722 framebuffer_release(info);
56723diff --git a/drivers/video/fbdev/via/via_clock.h b/drivers/video/fbdev/via/via_clock.h
56724index 88714ae..16c2e11 100644
56725--- a/drivers/video/fbdev/via/via_clock.h
56726+++ b/drivers/video/fbdev/via/via_clock.h
56727@@ -56,7 +56,7 @@ struct via_clock {
56728
56729 void (*set_engine_pll_state)(u8 state);
56730 void (*set_engine_pll)(struct via_pll_config config);
56731-};
56732+} __no_const;
56733
56734
56735 static inline u32 get_pll_internal_frequency(u32 ref_freq,
56736diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
56737index 3c14e43..2630570 100644
56738--- a/drivers/video/logo/logo_linux_clut224.ppm
56739+++ b/drivers/video/logo/logo_linux_clut224.ppm
56740@@ -2,1603 +2,1123 @@ P3
56741 # Standard 224-color Linux logo
56742 80 80
56743 255
56744- 0 0 0 0 0 0 0 0 0 0 0 0
56745- 0 0 0 0 0 0 0 0 0 0 0 0
56746- 0 0 0 0 0 0 0 0 0 0 0 0
56747- 0 0 0 0 0 0 0 0 0 0 0 0
56748- 0 0 0 0 0 0 0 0 0 0 0 0
56749- 0 0 0 0 0 0 0 0 0 0 0 0
56750- 0 0 0 0 0 0 0 0 0 0 0 0
56751- 0 0 0 0 0 0 0 0 0 0 0 0
56752- 0 0 0 0 0 0 0 0 0 0 0 0
56753- 6 6 6 6 6 6 10 10 10 10 10 10
56754- 10 10 10 6 6 6 6 6 6 6 6 6
56755- 0 0 0 0 0 0 0 0 0 0 0 0
56756- 0 0 0 0 0 0 0 0 0 0 0 0
56757- 0 0 0 0 0 0 0 0 0 0 0 0
56758- 0 0 0 0 0 0 0 0 0 0 0 0
56759- 0 0 0 0 0 0 0 0 0 0 0 0
56760- 0 0 0 0 0 0 0 0 0 0 0 0
56761- 0 0 0 0 0 0 0 0 0 0 0 0
56762- 0 0 0 0 0 0 0 0 0 0 0 0
56763- 0 0 0 0 0 0 0 0 0 0 0 0
56764- 0 0 0 0 0 0 0 0 0 0 0 0
56765- 0 0 0 0 0 0 0 0 0 0 0 0
56766- 0 0 0 0 0 0 0 0 0 0 0 0
56767- 0 0 0 0 0 0 0 0 0 0 0 0
56768- 0 0 0 0 0 0 0 0 0 0 0 0
56769- 0 0 0 0 0 0 0 0 0 0 0 0
56770- 0 0 0 0 0 0 0 0 0 0 0 0
56771- 0 0 0 0 0 0 0 0 0 0 0 0
56772- 0 0 0 6 6 6 10 10 10 14 14 14
56773- 22 22 22 26 26 26 30 30 30 34 34 34
56774- 30 30 30 30 30 30 26 26 26 18 18 18
56775- 14 14 14 10 10 10 6 6 6 0 0 0
56776- 0 0 0 0 0 0 0 0 0 0 0 0
56777- 0 0 0 0 0 0 0 0 0 0 0 0
56778- 0 0 0 0 0 0 0 0 0 0 0 0
56779- 0 0 0 0 0 0 0 0 0 0 0 0
56780- 0 0 0 0 0 0 0 0 0 0 0 0
56781- 0 0 0 0 0 0 0 0 0 0 0 0
56782- 0 0 0 0 0 0 0 0 0 0 0 0
56783- 0 0 0 0 0 0 0 0 0 0 0 0
56784- 0 0 0 0 0 0 0 0 0 0 0 0
56785- 0 0 0 0 0 1 0 0 1 0 0 0
56786- 0 0 0 0 0 0 0 0 0 0 0 0
56787- 0 0 0 0 0 0 0 0 0 0 0 0
56788- 0 0 0 0 0 0 0 0 0 0 0 0
56789- 0 0 0 0 0 0 0 0 0 0 0 0
56790- 0 0 0 0 0 0 0 0 0 0 0 0
56791- 0 0 0 0 0 0 0 0 0 0 0 0
56792- 6 6 6 14 14 14 26 26 26 42 42 42
56793- 54 54 54 66 66 66 78 78 78 78 78 78
56794- 78 78 78 74 74 74 66 66 66 54 54 54
56795- 42 42 42 26 26 26 18 18 18 10 10 10
56796- 6 6 6 0 0 0 0 0 0 0 0 0
56797- 0 0 0 0 0 0 0 0 0 0 0 0
56798- 0 0 0 0 0 0 0 0 0 0 0 0
56799- 0 0 0 0 0 0 0 0 0 0 0 0
56800- 0 0 0 0 0 0 0 0 0 0 0 0
56801- 0 0 0 0 0 0 0 0 0 0 0 0
56802- 0 0 0 0 0 0 0 0 0 0 0 0
56803- 0 0 0 0 0 0 0 0 0 0 0 0
56804- 0 0 0 0 0 0 0 0 0 0 0 0
56805- 0 0 1 0 0 0 0 0 0 0 0 0
56806- 0 0 0 0 0 0 0 0 0 0 0 0
56807- 0 0 0 0 0 0 0 0 0 0 0 0
56808- 0 0 0 0 0 0 0 0 0 0 0 0
56809- 0 0 0 0 0 0 0 0 0 0 0 0
56810- 0 0 0 0 0 0 0 0 0 0 0 0
56811- 0 0 0 0 0 0 0 0 0 10 10 10
56812- 22 22 22 42 42 42 66 66 66 86 86 86
56813- 66 66 66 38 38 38 38 38 38 22 22 22
56814- 26 26 26 34 34 34 54 54 54 66 66 66
56815- 86 86 86 70 70 70 46 46 46 26 26 26
56816- 14 14 14 6 6 6 0 0 0 0 0 0
56817- 0 0 0 0 0 0 0 0 0 0 0 0
56818- 0 0 0 0 0 0 0 0 0 0 0 0
56819- 0 0 0 0 0 0 0 0 0 0 0 0
56820- 0 0 0 0 0 0 0 0 0 0 0 0
56821- 0 0 0 0 0 0 0 0 0 0 0 0
56822- 0 0 0 0 0 0 0 0 0 0 0 0
56823- 0 0 0 0 0 0 0 0 0 0 0 0
56824- 0 0 0 0 0 0 0 0 0 0 0 0
56825- 0 0 1 0 0 1 0 0 1 0 0 0
56826- 0 0 0 0 0 0 0 0 0 0 0 0
56827- 0 0 0 0 0 0 0 0 0 0 0 0
56828- 0 0 0 0 0 0 0 0 0 0 0 0
56829- 0 0 0 0 0 0 0 0 0 0 0 0
56830- 0 0 0 0 0 0 0 0 0 0 0 0
56831- 0 0 0 0 0 0 10 10 10 26 26 26
56832- 50 50 50 82 82 82 58 58 58 6 6 6
56833- 2 2 6 2 2 6 2 2 6 2 2 6
56834- 2 2 6 2 2 6 2 2 6 2 2 6
56835- 6 6 6 54 54 54 86 86 86 66 66 66
56836- 38 38 38 18 18 18 6 6 6 0 0 0
56837- 0 0 0 0 0 0 0 0 0 0 0 0
56838- 0 0 0 0 0 0 0 0 0 0 0 0
56839- 0 0 0 0 0 0 0 0 0 0 0 0
56840- 0 0 0 0 0 0 0 0 0 0 0 0
56841- 0 0 0 0 0 0 0 0 0 0 0 0
56842- 0 0 0 0 0 0 0 0 0 0 0 0
56843- 0 0 0 0 0 0 0 0 0 0 0 0
56844- 0 0 0 0 0 0 0 0 0 0 0 0
56845- 0 0 0 0 0 0 0 0 0 0 0 0
56846- 0 0 0 0 0 0 0 0 0 0 0 0
56847- 0 0 0 0 0 0 0 0 0 0 0 0
56848- 0 0 0 0 0 0 0 0 0 0 0 0
56849- 0 0 0 0 0 0 0 0 0 0 0 0
56850- 0 0 0 0 0 0 0 0 0 0 0 0
56851- 0 0 0 6 6 6 22 22 22 50 50 50
56852- 78 78 78 34 34 34 2 2 6 2 2 6
56853- 2 2 6 2 2 6 2 2 6 2 2 6
56854- 2 2 6 2 2 6 2 2 6 2 2 6
56855- 2 2 6 2 2 6 6 6 6 70 70 70
56856- 78 78 78 46 46 46 22 22 22 6 6 6
56857- 0 0 0 0 0 0 0 0 0 0 0 0
56858- 0 0 0 0 0 0 0 0 0 0 0 0
56859- 0 0 0 0 0 0 0 0 0 0 0 0
56860- 0 0 0 0 0 0 0 0 0 0 0 0
56861- 0 0 0 0 0 0 0 0 0 0 0 0
56862- 0 0 0 0 0 0 0 0 0 0 0 0
56863- 0 0 0 0 0 0 0 0 0 0 0 0
56864- 0 0 0 0 0 0 0 0 0 0 0 0
56865- 0 0 1 0 0 1 0 0 1 0 0 0
56866- 0 0 0 0 0 0 0 0 0 0 0 0
56867- 0 0 0 0 0 0 0 0 0 0 0 0
56868- 0 0 0 0 0 0 0 0 0 0 0 0
56869- 0 0 0 0 0 0 0 0 0 0 0 0
56870- 0 0 0 0 0 0 0 0 0 0 0 0
56871- 6 6 6 18 18 18 42 42 42 82 82 82
56872- 26 26 26 2 2 6 2 2 6 2 2 6
56873- 2 2 6 2 2 6 2 2 6 2 2 6
56874- 2 2 6 2 2 6 2 2 6 14 14 14
56875- 46 46 46 34 34 34 6 6 6 2 2 6
56876- 42 42 42 78 78 78 42 42 42 18 18 18
56877- 6 6 6 0 0 0 0 0 0 0 0 0
56878- 0 0 0 0 0 0 0 0 0 0 0 0
56879- 0 0 0 0 0 0 0 0 0 0 0 0
56880- 0 0 0 0 0 0 0 0 0 0 0 0
56881- 0 0 0 0 0 0 0 0 0 0 0 0
56882- 0 0 0 0 0 0 0 0 0 0 0 0
56883- 0 0 0 0 0 0 0 0 0 0 0 0
56884- 0 0 0 0 0 0 0 0 0 0 0 0
56885- 0 0 1 0 0 0 0 0 1 0 0 0
56886- 0 0 0 0 0 0 0 0 0 0 0 0
56887- 0 0 0 0 0 0 0 0 0 0 0 0
56888- 0 0 0 0 0 0 0 0 0 0 0 0
56889- 0 0 0 0 0 0 0 0 0 0 0 0
56890- 0 0 0 0 0 0 0 0 0 0 0 0
56891- 10 10 10 30 30 30 66 66 66 58 58 58
56892- 2 2 6 2 2 6 2 2 6 2 2 6
56893- 2 2 6 2 2 6 2 2 6 2 2 6
56894- 2 2 6 2 2 6 2 2 6 26 26 26
56895- 86 86 86 101 101 101 46 46 46 10 10 10
56896- 2 2 6 58 58 58 70 70 70 34 34 34
56897- 10 10 10 0 0 0 0 0 0 0 0 0
56898- 0 0 0 0 0 0 0 0 0 0 0 0
56899- 0 0 0 0 0 0 0 0 0 0 0 0
56900- 0 0 0 0 0 0 0 0 0 0 0 0
56901- 0 0 0 0 0 0 0 0 0 0 0 0
56902- 0 0 0 0 0 0 0 0 0 0 0 0
56903- 0 0 0 0 0 0 0 0 0 0 0 0
56904- 0 0 0 0 0 0 0 0 0 0 0 0
56905- 0 0 1 0 0 1 0 0 1 0 0 0
56906- 0 0 0 0 0 0 0 0 0 0 0 0
56907- 0 0 0 0 0 0 0 0 0 0 0 0
56908- 0 0 0 0 0 0 0 0 0 0 0 0
56909- 0 0 0 0 0 0 0 0 0 0 0 0
56910- 0 0 0 0 0 0 0 0 0 0 0 0
56911- 14 14 14 42 42 42 86 86 86 10 10 10
56912- 2 2 6 2 2 6 2 2 6 2 2 6
56913- 2 2 6 2 2 6 2 2 6 2 2 6
56914- 2 2 6 2 2 6 2 2 6 30 30 30
56915- 94 94 94 94 94 94 58 58 58 26 26 26
56916- 2 2 6 6 6 6 78 78 78 54 54 54
56917- 22 22 22 6 6 6 0 0 0 0 0 0
56918- 0 0 0 0 0 0 0 0 0 0 0 0
56919- 0 0 0 0 0 0 0 0 0 0 0 0
56920- 0 0 0 0 0 0 0 0 0 0 0 0
56921- 0 0 0 0 0 0 0 0 0 0 0 0
56922- 0 0 0 0 0 0 0 0 0 0 0 0
56923- 0 0 0 0 0 0 0 0 0 0 0 0
56924- 0 0 0 0 0 0 0 0 0 0 0 0
56925- 0 0 0 0 0 0 0 0 0 0 0 0
56926- 0 0 0 0 0 0 0 0 0 0 0 0
56927- 0 0 0 0 0 0 0 0 0 0 0 0
56928- 0 0 0 0 0 0 0 0 0 0 0 0
56929- 0 0 0 0 0 0 0 0 0 0 0 0
56930- 0 0 0 0 0 0 0 0 0 6 6 6
56931- 22 22 22 62 62 62 62 62 62 2 2 6
56932- 2 2 6 2 2 6 2 2 6 2 2 6
56933- 2 2 6 2 2 6 2 2 6 2 2 6
56934- 2 2 6 2 2 6 2 2 6 26 26 26
56935- 54 54 54 38 38 38 18 18 18 10 10 10
56936- 2 2 6 2 2 6 34 34 34 82 82 82
56937- 38 38 38 14 14 14 0 0 0 0 0 0
56938- 0 0 0 0 0 0 0 0 0 0 0 0
56939- 0 0 0 0 0 0 0 0 0 0 0 0
56940- 0 0 0 0 0 0 0 0 0 0 0 0
56941- 0 0 0 0 0 0 0 0 0 0 0 0
56942- 0 0 0 0 0 0 0 0 0 0 0 0
56943- 0 0 0 0 0 0 0 0 0 0 0 0
56944- 0 0 0 0 0 0 0 0 0 0 0 0
56945- 0 0 0 0 0 1 0 0 1 0 0 0
56946- 0 0 0 0 0 0 0 0 0 0 0 0
56947- 0 0 0 0 0 0 0 0 0 0 0 0
56948- 0 0 0 0 0 0 0 0 0 0 0 0
56949- 0 0 0 0 0 0 0 0 0 0 0 0
56950- 0 0 0 0 0 0 0 0 0 6 6 6
56951- 30 30 30 78 78 78 30 30 30 2 2 6
56952- 2 2 6 2 2 6 2 2 6 2 2 6
56953- 2 2 6 2 2 6 2 2 6 2 2 6
56954- 2 2 6 2 2 6 2 2 6 10 10 10
56955- 10 10 10 2 2 6 2 2 6 2 2 6
56956- 2 2 6 2 2 6 2 2 6 78 78 78
56957- 50 50 50 18 18 18 6 6 6 0 0 0
56958- 0 0 0 0 0 0 0 0 0 0 0 0
56959- 0 0 0 0 0 0 0 0 0 0 0 0
56960- 0 0 0 0 0 0 0 0 0 0 0 0
56961- 0 0 0 0 0 0 0 0 0 0 0 0
56962- 0 0 0 0 0 0 0 0 0 0 0 0
56963- 0 0 0 0 0 0 0 0 0 0 0 0
56964- 0 0 0 0 0 0 0 0 0 0 0 0
56965- 0 0 1 0 0 0 0 0 0 0 0 0
56966- 0 0 0 0 0 0 0 0 0 0 0 0
56967- 0 0 0 0 0 0 0 0 0 0 0 0
56968- 0 0 0 0 0 0 0 0 0 0 0 0
56969- 0 0 0 0 0 0 0 0 0 0 0 0
56970- 0 0 0 0 0 0 0 0 0 10 10 10
56971- 38 38 38 86 86 86 14 14 14 2 2 6
56972- 2 2 6 2 2 6 2 2 6 2 2 6
56973- 2 2 6 2 2 6 2 2 6 2 2 6
56974- 2 2 6 2 2 6 2 2 6 2 2 6
56975- 2 2 6 2 2 6 2 2 6 2 2 6
56976- 2 2 6 2 2 6 2 2 6 54 54 54
56977- 66 66 66 26 26 26 6 6 6 0 0 0
56978- 0 0 0 0 0 0 0 0 0 0 0 0
56979- 0 0 0 0 0 0 0 0 0 0 0 0
56980- 0 0 0 0 0 0 0 0 0 0 0 0
56981- 0 0 0 0 0 0 0 0 0 0 0 0
56982- 0 0 0 0 0 0 0 0 0 0 0 0
56983- 0 0 0 0 0 0 0 0 0 0 0 0
56984- 0 0 0 0 0 0 0 0 0 0 0 0
56985- 0 0 0 0 0 1 0 0 1 0 0 0
56986- 0 0 0 0 0 0 0 0 0 0 0 0
56987- 0 0 0 0 0 0 0 0 0 0 0 0
56988- 0 0 0 0 0 0 0 0 0 0 0 0
56989- 0 0 0 0 0 0 0 0 0 0 0 0
56990- 0 0 0 0 0 0 0 0 0 14 14 14
56991- 42 42 42 82 82 82 2 2 6 2 2 6
56992- 2 2 6 6 6 6 10 10 10 2 2 6
56993- 2 2 6 2 2 6 2 2 6 2 2 6
56994- 2 2 6 2 2 6 2 2 6 6 6 6
56995- 14 14 14 10 10 10 2 2 6 2 2 6
56996- 2 2 6 2 2 6 2 2 6 18 18 18
56997- 82 82 82 34 34 34 10 10 10 0 0 0
56998- 0 0 0 0 0 0 0 0 0 0 0 0
56999- 0 0 0 0 0 0 0 0 0 0 0 0
57000- 0 0 0 0 0 0 0 0 0 0 0 0
57001- 0 0 0 0 0 0 0 0 0 0 0 0
57002- 0 0 0 0 0 0 0 0 0 0 0 0
57003- 0 0 0 0 0 0 0 0 0 0 0 0
57004- 0 0 0 0 0 0 0 0 0 0 0 0
57005- 0 0 1 0 0 0 0 0 0 0 0 0
57006- 0 0 0 0 0 0 0 0 0 0 0 0
57007- 0 0 0 0 0 0 0 0 0 0 0 0
57008- 0 0 0 0 0 0 0 0 0 0 0 0
57009- 0 0 0 0 0 0 0 0 0 0 0 0
57010- 0 0 0 0 0 0 0 0 0 14 14 14
57011- 46 46 46 86 86 86 2 2 6 2 2 6
57012- 6 6 6 6 6 6 22 22 22 34 34 34
57013- 6 6 6 2 2 6 2 2 6 2 2 6
57014- 2 2 6 2 2 6 18 18 18 34 34 34
57015- 10 10 10 50 50 50 22 22 22 2 2 6
57016- 2 2 6 2 2 6 2 2 6 10 10 10
57017- 86 86 86 42 42 42 14 14 14 0 0 0
57018- 0 0 0 0 0 0 0 0 0 0 0 0
57019- 0 0 0 0 0 0 0 0 0 0 0 0
57020- 0 0 0 0 0 0 0 0 0 0 0 0
57021- 0 0 0 0 0 0 0 0 0 0 0 0
57022- 0 0 0 0 0 0 0 0 0 0 0 0
57023- 0 0 0 0 0 0 0 0 0 0 0 0
57024- 0 0 0 0 0 0 0 0 0 0 0 0
57025- 0 0 1 0 0 1 0 0 1 0 0 0
57026- 0 0 0 0 0 0 0 0 0 0 0 0
57027- 0 0 0 0 0 0 0 0 0 0 0 0
57028- 0 0 0 0 0 0 0 0 0 0 0 0
57029- 0 0 0 0 0 0 0 0 0 0 0 0
57030- 0 0 0 0 0 0 0 0 0 14 14 14
57031- 46 46 46 86 86 86 2 2 6 2 2 6
57032- 38 38 38 116 116 116 94 94 94 22 22 22
57033- 22 22 22 2 2 6 2 2 6 2 2 6
57034- 14 14 14 86 86 86 138 138 138 162 162 162
57035-154 154 154 38 38 38 26 26 26 6 6 6
57036- 2 2 6 2 2 6 2 2 6 2 2 6
57037- 86 86 86 46 46 46 14 14 14 0 0 0
57038- 0 0 0 0 0 0 0 0 0 0 0 0
57039- 0 0 0 0 0 0 0 0 0 0 0 0
57040- 0 0 0 0 0 0 0 0 0 0 0 0
57041- 0 0 0 0 0 0 0 0 0 0 0 0
57042- 0 0 0 0 0 0 0 0 0 0 0 0
57043- 0 0 0 0 0 0 0 0 0 0 0 0
57044- 0 0 0 0 0 0 0 0 0 0 0 0
57045- 0 0 0 0 0 0 0 0 0 0 0 0
57046- 0 0 0 0 0 0 0 0 0 0 0 0
57047- 0 0 0 0 0 0 0 0 0 0 0 0
57048- 0 0 0 0 0 0 0 0 0 0 0 0
57049- 0 0 0 0 0 0 0 0 0 0 0 0
57050- 0 0 0 0 0 0 0 0 0 14 14 14
57051- 46 46 46 86 86 86 2 2 6 14 14 14
57052-134 134 134 198 198 198 195 195 195 116 116 116
57053- 10 10 10 2 2 6 2 2 6 6 6 6
57054-101 98 89 187 187 187 210 210 210 218 218 218
57055-214 214 214 134 134 134 14 14 14 6 6 6
57056- 2 2 6 2 2 6 2 2 6 2 2 6
57057- 86 86 86 50 50 50 18 18 18 6 6 6
57058- 0 0 0 0 0 0 0 0 0 0 0 0
57059- 0 0 0 0 0 0 0 0 0 0 0 0
57060- 0 0 0 0 0 0 0 0 0 0 0 0
57061- 0 0 0 0 0 0 0 0 0 0 0 0
57062- 0 0 0 0 0 0 0 0 0 0 0 0
57063- 0 0 0 0 0 0 0 0 0 0 0 0
57064- 0 0 0 0 0 0 0 0 1 0 0 0
57065- 0 0 1 0 0 1 0 0 1 0 0 0
57066- 0 0 0 0 0 0 0 0 0 0 0 0
57067- 0 0 0 0 0 0 0 0 0 0 0 0
57068- 0 0 0 0 0 0 0 0 0 0 0 0
57069- 0 0 0 0 0 0 0 0 0 0 0 0
57070- 0 0 0 0 0 0 0 0 0 14 14 14
57071- 46 46 46 86 86 86 2 2 6 54 54 54
57072-218 218 218 195 195 195 226 226 226 246 246 246
57073- 58 58 58 2 2 6 2 2 6 30 30 30
57074-210 210 210 253 253 253 174 174 174 123 123 123
57075-221 221 221 234 234 234 74 74 74 2 2 6
57076- 2 2 6 2 2 6 2 2 6 2 2 6
57077- 70 70 70 58 58 58 22 22 22 6 6 6
57078- 0 0 0 0 0 0 0 0 0 0 0 0
57079- 0 0 0 0 0 0 0 0 0 0 0 0
57080- 0 0 0 0 0 0 0 0 0 0 0 0
57081- 0 0 0 0 0 0 0 0 0 0 0 0
57082- 0 0 0 0 0 0 0 0 0 0 0 0
57083- 0 0 0 0 0 0 0 0 0 0 0 0
57084- 0 0 0 0 0 0 0 0 0 0 0 0
57085- 0 0 0 0 0 0 0 0 0 0 0 0
57086- 0 0 0 0 0 0 0 0 0 0 0 0
57087- 0 0 0 0 0 0 0 0 0 0 0 0
57088- 0 0 0 0 0 0 0 0 0 0 0 0
57089- 0 0 0 0 0 0 0 0 0 0 0 0
57090- 0 0 0 0 0 0 0 0 0 14 14 14
57091- 46 46 46 82 82 82 2 2 6 106 106 106
57092-170 170 170 26 26 26 86 86 86 226 226 226
57093-123 123 123 10 10 10 14 14 14 46 46 46
57094-231 231 231 190 190 190 6 6 6 70 70 70
57095- 90 90 90 238 238 238 158 158 158 2 2 6
57096- 2 2 6 2 2 6 2 2 6 2 2 6
57097- 70 70 70 58 58 58 22 22 22 6 6 6
57098- 0 0 0 0 0 0 0 0 0 0 0 0
57099- 0 0 0 0 0 0 0 0 0 0 0 0
57100- 0 0 0 0 0 0 0 0 0 0 0 0
57101- 0 0 0 0 0 0 0 0 0 0 0 0
57102- 0 0 0 0 0 0 0 0 0 0 0 0
57103- 0 0 0 0 0 0 0 0 0 0 0 0
57104- 0 0 0 0 0 0 0 0 1 0 0 0
57105- 0 0 1 0 0 1 0 0 1 0 0 0
57106- 0 0 0 0 0 0 0 0 0 0 0 0
57107- 0 0 0 0 0 0 0 0 0 0 0 0
57108- 0 0 0 0 0 0 0 0 0 0 0 0
57109- 0 0 0 0 0 0 0 0 0 0 0 0
57110- 0 0 0 0 0 0 0 0 0 14 14 14
57111- 42 42 42 86 86 86 6 6 6 116 116 116
57112-106 106 106 6 6 6 70 70 70 149 149 149
57113-128 128 128 18 18 18 38 38 38 54 54 54
57114-221 221 221 106 106 106 2 2 6 14 14 14
57115- 46 46 46 190 190 190 198 198 198 2 2 6
57116- 2 2 6 2 2 6 2 2 6 2 2 6
57117- 74 74 74 62 62 62 22 22 22 6 6 6
57118- 0 0 0 0 0 0 0 0 0 0 0 0
57119- 0 0 0 0 0 0 0 0 0 0 0 0
57120- 0 0 0 0 0 0 0 0 0 0 0 0
57121- 0 0 0 0 0 0 0 0 0 0 0 0
57122- 0 0 0 0 0 0 0 0 0 0 0 0
57123- 0 0 0 0 0 0 0 0 0 0 0 0
57124- 0 0 0 0 0 0 0 0 1 0 0 0
57125- 0 0 1 0 0 0 0 0 1 0 0 0
57126- 0 0 0 0 0 0 0 0 0 0 0 0
57127- 0 0 0 0 0 0 0 0 0 0 0 0
57128- 0 0 0 0 0 0 0 0 0 0 0 0
57129- 0 0 0 0 0 0 0 0 0 0 0 0
57130- 0 0 0 0 0 0 0 0 0 14 14 14
57131- 42 42 42 94 94 94 14 14 14 101 101 101
57132-128 128 128 2 2 6 18 18 18 116 116 116
57133-118 98 46 121 92 8 121 92 8 98 78 10
57134-162 162 162 106 106 106 2 2 6 2 2 6
57135- 2 2 6 195 195 195 195 195 195 6 6 6
57136- 2 2 6 2 2 6 2 2 6 2 2 6
57137- 74 74 74 62 62 62 22 22 22 6 6 6
57138- 0 0 0 0 0 0 0 0 0 0 0 0
57139- 0 0 0 0 0 0 0 0 0 0 0 0
57140- 0 0 0 0 0 0 0 0 0 0 0 0
57141- 0 0 0 0 0 0 0 0 0 0 0 0
57142- 0 0 0 0 0 0 0 0 0 0 0 0
57143- 0 0 0 0 0 0 0 0 0 0 0 0
57144- 0 0 0 0 0 0 0 0 1 0 0 1
57145- 0 0 1 0 0 0 0 0 1 0 0 0
57146- 0 0 0 0 0 0 0 0 0 0 0 0
57147- 0 0 0 0 0 0 0 0 0 0 0 0
57148- 0 0 0 0 0 0 0 0 0 0 0 0
57149- 0 0 0 0 0 0 0 0 0 0 0 0
57150- 0 0 0 0 0 0 0 0 0 10 10 10
57151- 38 38 38 90 90 90 14 14 14 58 58 58
57152-210 210 210 26 26 26 54 38 6 154 114 10
57153-226 170 11 236 186 11 225 175 15 184 144 12
57154-215 174 15 175 146 61 37 26 9 2 2 6
57155- 70 70 70 246 246 246 138 138 138 2 2 6
57156- 2 2 6 2 2 6 2 2 6 2 2 6
57157- 70 70 70 66 66 66 26 26 26 6 6 6
57158- 0 0 0 0 0 0 0 0 0 0 0 0
57159- 0 0 0 0 0 0 0 0 0 0 0 0
57160- 0 0 0 0 0 0 0 0 0 0 0 0
57161- 0 0 0 0 0 0 0 0 0 0 0 0
57162- 0 0 0 0 0 0 0 0 0 0 0 0
57163- 0 0 0 0 0 0 0 0 0 0 0 0
57164- 0 0 0 0 0 0 0 0 0 0 0 0
57165- 0 0 0 0 0 0 0 0 0 0 0 0
57166- 0 0 0 0 0 0 0 0 0 0 0 0
57167- 0 0 0 0 0 0 0 0 0 0 0 0
57168- 0 0 0 0 0 0 0 0 0 0 0 0
57169- 0 0 0 0 0 0 0 0 0 0 0 0
57170- 0 0 0 0 0 0 0 0 0 10 10 10
57171- 38 38 38 86 86 86 14 14 14 10 10 10
57172-195 195 195 188 164 115 192 133 9 225 175 15
57173-239 182 13 234 190 10 232 195 16 232 200 30
57174-245 207 45 241 208 19 232 195 16 184 144 12
57175-218 194 134 211 206 186 42 42 42 2 2 6
57176- 2 2 6 2 2 6 2 2 6 2 2 6
57177- 50 50 50 74 74 74 30 30 30 6 6 6
57178- 0 0 0 0 0 0 0 0 0 0 0 0
57179- 0 0 0 0 0 0 0 0 0 0 0 0
57180- 0 0 0 0 0 0 0 0 0 0 0 0
57181- 0 0 0 0 0 0 0 0 0 0 0 0
57182- 0 0 0 0 0 0 0 0 0 0 0 0
57183- 0 0 0 0 0 0 0 0 0 0 0 0
57184- 0 0 0 0 0 0 0 0 0 0 0 0
57185- 0 0 0 0 0 0 0 0 0 0 0 0
57186- 0 0 0 0 0 0 0 0 0 0 0 0
57187- 0 0 0 0 0 0 0 0 0 0 0 0
57188- 0 0 0 0 0 0 0 0 0 0 0 0
57189- 0 0 0 0 0 0 0 0 0 0 0 0
57190- 0 0 0 0 0 0 0 0 0 10 10 10
57191- 34 34 34 86 86 86 14 14 14 2 2 6
57192-121 87 25 192 133 9 219 162 10 239 182 13
57193-236 186 11 232 195 16 241 208 19 244 214 54
57194-246 218 60 246 218 38 246 215 20 241 208 19
57195-241 208 19 226 184 13 121 87 25 2 2 6
57196- 2 2 6 2 2 6 2 2 6 2 2 6
57197- 50 50 50 82 82 82 34 34 34 10 10 10
57198- 0 0 0 0 0 0 0 0 0 0 0 0
57199- 0 0 0 0 0 0 0 0 0 0 0 0
57200- 0 0 0 0 0 0 0 0 0 0 0 0
57201- 0 0 0 0 0 0 0 0 0 0 0 0
57202- 0 0 0 0 0 0 0 0 0 0 0 0
57203- 0 0 0 0 0 0 0 0 0 0 0 0
57204- 0 0 0 0 0 0 0 0 0 0 0 0
57205- 0 0 0 0 0 0 0 0 0 0 0 0
57206- 0 0 0 0 0 0 0 0 0 0 0 0
57207- 0 0 0 0 0 0 0 0 0 0 0 0
57208- 0 0 0 0 0 0 0 0 0 0 0 0
57209- 0 0 0 0 0 0 0 0 0 0 0 0
57210- 0 0 0 0 0 0 0 0 0 10 10 10
57211- 34 34 34 82 82 82 30 30 30 61 42 6
57212-180 123 7 206 145 10 230 174 11 239 182 13
57213-234 190 10 238 202 15 241 208 19 246 218 74
57214-246 218 38 246 215 20 246 215 20 246 215 20
57215-226 184 13 215 174 15 184 144 12 6 6 6
57216- 2 2 6 2 2 6 2 2 6 2 2 6
57217- 26 26 26 94 94 94 42 42 42 14 14 14
57218- 0 0 0 0 0 0 0 0 0 0 0 0
57219- 0 0 0 0 0 0 0 0 0 0 0 0
57220- 0 0 0 0 0 0 0 0 0 0 0 0
57221- 0 0 0 0 0 0 0 0 0 0 0 0
57222- 0 0 0 0 0 0 0 0 0 0 0 0
57223- 0 0 0 0 0 0 0 0 0 0 0 0
57224- 0 0 0 0 0 0 0 0 0 0 0 0
57225- 0 0 0 0 0 0 0 0 0 0 0 0
57226- 0 0 0 0 0 0 0 0 0 0 0 0
57227- 0 0 0 0 0 0 0 0 0 0 0 0
57228- 0 0 0 0 0 0 0 0 0 0 0 0
57229- 0 0 0 0 0 0 0 0 0 0 0 0
57230- 0 0 0 0 0 0 0 0 0 10 10 10
57231- 30 30 30 78 78 78 50 50 50 104 69 6
57232-192 133 9 216 158 10 236 178 12 236 186 11
57233-232 195 16 241 208 19 244 214 54 245 215 43
57234-246 215 20 246 215 20 241 208 19 198 155 10
57235-200 144 11 216 158 10 156 118 10 2 2 6
57236- 2 2 6 2 2 6 2 2 6 2 2 6
57237- 6 6 6 90 90 90 54 54 54 18 18 18
57238- 6 6 6 0 0 0 0 0 0 0 0 0
57239- 0 0 0 0 0 0 0 0 0 0 0 0
57240- 0 0 0 0 0 0 0 0 0 0 0 0
57241- 0 0 0 0 0 0 0 0 0 0 0 0
57242- 0 0 0 0 0 0 0 0 0 0 0 0
57243- 0 0 0 0 0 0 0 0 0 0 0 0
57244- 0 0 0 0 0 0 0 0 0 0 0 0
57245- 0 0 0 0 0 0 0 0 0 0 0 0
57246- 0 0 0 0 0 0 0 0 0 0 0 0
57247- 0 0 0 0 0 0 0 0 0 0 0 0
57248- 0 0 0 0 0 0 0 0 0 0 0 0
57249- 0 0 0 0 0 0 0 0 0 0 0 0
57250- 0 0 0 0 0 0 0 0 0 10 10 10
57251- 30 30 30 78 78 78 46 46 46 22 22 22
57252-137 92 6 210 162 10 239 182 13 238 190 10
57253-238 202 15 241 208 19 246 215 20 246 215 20
57254-241 208 19 203 166 17 185 133 11 210 150 10
57255-216 158 10 210 150 10 102 78 10 2 2 6
57256- 6 6 6 54 54 54 14 14 14 2 2 6
57257- 2 2 6 62 62 62 74 74 74 30 30 30
57258- 10 10 10 0 0 0 0 0 0 0 0 0
57259- 0 0 0 0 0 0 0 0 0 0 0 0
57260- 0 0 0 0 0 0 0 0 0 0 0 0
57261- 0 0 0 0 0 0 0 0 0 0 0 0
57262- 0 0 0 0 0 0 0 0 0 0 0 0
57263- 0 0 0 0 0 0 0 0 0 0 0 0
57264- 0 0 0 0 0 0 0 0 0 0 0 0
57265- 0 0 0 0 0 0 0 0 0 0 0 0
57266- 0 0 0 0 0 0 0 0 0 0 0 0
57267- 0 0 0 0 0 0 0 0 0 0 0 0
57268- 0 0 0 0 0 0 0 0 0 0 0 0
57269- 0 0 0 0 0 0 0 0 0 0 0 0
57270- 0 0 0 0 0 0 0 0 0 10 10 10
57271- 34 34 34 78 78 78 50 50 50 6 6 6
57272- 94 70 30 139 102 15 190 146 13 226 184 13
57273-232 200 30 232 195 16 215 174 15 190 146 13
57274-168 122 10 192 133 9 210 150 10 213 154 11
57275-202 150 34 182 157 106 101 98 89 2 2 6
57276- 2 2 6 78 78 78 116 116 116 58 58 58
57277- 2 2 6 22 22 22 90 90 90 46 46 46
57278- 18 18 18 6 6 6 0 0 0 0 0 0
57279- 0 0 0 0 0 0 0 0 0 0 0 0
57280- 0 0 0 0 0 0 0 0 0 0 0 0
57281- 0 0 0 0 0 0 0 0 0 0 0 0
57282- 0 0 0 0 0 0 0 0 0 0 0 0
57283- 0 0 0 0 0 0 0 0 0 0 0 0
57284- 0 0 0 0 0 0 0 0 0 0 0 0
57285- 0 0 0 0 0 0 0 0 0 0 0 0
57286- 0 0 0 0 0 0 0 0 0 0 0 0
57287- 0 0 0 0 0 0 0 0 0 0 0 0
57288- 0 0 0 0 0 0 0 0 0 0 0 0
57289- 0 0 0 0 0 0 0 0 0 0 0 0
57290- 0 0 0 0 0 0 0 0 0 10 10 10
57291- 38 38 38 86 86 86 50 50 50 6 6 6
57292-128 128 128 174 154 114 156 107 11 168 122 10
57293-198 155 10 184 144 12 197 138 11 200 144 11
57294-206 145 10 206 145 10 197 138 11 188 164 115
57295-195 195 195 198 198 198 174 174 174 14 14 14
57296- 2 2 6 22 22 22 116 116 116 116 116 116
57297- 22 22 22 2 2 6 74 74 74 70 70 70
57298- 30 30 30 10 10 10 0 0 0 0 0 0
57299- 0 0 0 0 0 0 0 0 0 0 0 0
57300- 0 0 0 0 0 0 0 0 0 0 0 0
57301- 0 0 0 0 0 0 0 0 0 0 0 0
57302- 0 0 0 0 0 0 0 0 0 0 0 0
57303- 0 0 0 0 0 0 0 0 0 0 0 0
57304- 0 0 0 0 0 0 0 0 0 0 0 0
57305- 0 0 0 0 0 0 0 0 0 0 0 0
57306- 0 0 0 0 0 0 0 0 0 0 0 0
57307- 0 0 0 0 0 0 0 0 0 0 0 0
57308- 0 0 0 0 0 0 0 0 0 0 0 0
57309- 0 0 0 0 0 0 0 0 0 0 0 0
57310- 0 0 0 0 0 0 6 6 6 18 18 18
57311- 50 50 50 101 101 101 26 26 26 10 10 10
57312-138 138 138 190 190 190 174 154 114 156 107 11
57313-197 138 11 200 144 11 197 138 11 192 133 9
57314-180 123 7 190 142 34 190 178 144 187 187 187
57315-202 202 202 221 221 221 214 214 214 66 66 66
57316- 2 2 6 2 2 6 50 50 50 62 62 62
57317- 6 6 6 2 2 6 10 10 10 90 90 90
57318- 50 50 50 18 18 18 6 6 6 0 0 0
57319- 0 0 0 0 0 0 0 0 0 0 0 0
57320- 0 0 0 0 0 0 0 0 0 0 0 0
57321- 0 0 0 0 0 0 0 0 0 0 0 0
57322- 0 0 0 0 0 0 0 0 0 0 0 0
57323- 0 0 0 0 0 0 0 0 0 0 0 0
57324- 0 0 0 0 0 0 0 0 0 0 0 0
57325- 0 0 0 0 0 0 0 0 0 0 0 0
57326- 0 0 0 0 0 0 0 0 0 0 0 0
57327- 0 0 0 0 0 0 0 0 0 0 0 0
57328- 0 0 0 0 0 0 0 0 0 0 0 0
57329- 0 0 0 0 0 0 0 0 0 0 0 0
57330- 0 0 0 0 0 0 10 10 10 34 34 34
57331- 74 74 74 74 74 74 2 2 6 6 6 6
57332-144 144 144 198 198 198 190 190 190 178 166 146
57333-154 121 60 156 107 11 156 107 11 168 124 44
57334-174 154 114 187 187 187 190 190 190 210 210 210
57335-246 246 246 253 253 253 253 253 253 182 182 182
57336- 6 6 6 2 2 6 2 2 6 2 2 6
57337- 2 2 6 2 2 6 2 2 6 62 62 62
57338- 74 74 74 34 34 34 14 14 14 0 0 0
57339- 0 0 0 0 0 0 0 0 0 0 0 0
57340- 0 0 0 0 0 0 0 0 0 0 0 0
57341- 0 0 0 0 0 0 0 0 0 0 0 0
57342- 0 0 0 0 0 0 0 0 0 0 0 0
57343- 0 0 0 0 0 0 0 0 0 0 0 0
57344- 0 0 0 0 0 0 0 0 0 0 0 0
57345- 0 0 0 0 0 0 0 0 0 0 0 0
57346- 0 0 0 0 0 0 0 0 0 0 0 0
57347- 0 0 0 0 0 0 0 0 0 0 0 0
57348- 0 0 0 0 0 0 0 0 0 0 0 0
57349- 0 0 0 0 0 0 0 0 0 0 0 0
57350- 0 0 0 10 10 10 22 22 22 54 54 54
57351- 94 94 94 18 18 18 2 2 6 46 46 46
57352-234 234 234 221 221 221 190 190 190 190 190 190
57353-190 190 190 187 187 187 187 187 187 190 190 190
57354-190 190 190 195 195 195 214 214 214 242 242 242
57355-253 253 253 253 253 253 253 253 253 253 253 253
57356- 82 82 82 2 2 6 2 2 6 2 2 6
57357- 2 2 6 2 2 6 2 2 6 14 14 14
57358- 86 86 86 54 54 54 22 22 22 6 6 6
57359- 0 0 0 0 0 0 0 0 0 0 0 0
57360- 0 0 0 0 0 0 0 0 0 0 0 0
57361- 0 0 0 0 0 0 0 0 0 0 0 0
57362- 0 0 0 0 0 0 0 0 0 0 0 0
57363- 0 0 0 0 0 0 0 0 0 0 0 0
57364- 0 0 0 0 0 0 0 0 0 0 0 0
57365- 0 0 0 0 0 0 0 0 0 0 0 0
57366- 0 0 0 0 0 0 0 0 0 0 0 0
57367- 0 0 0 0 0 0 0 0 0 0 0 0
57368- 0 0 0 0 0 0 0 0 0 0 0 0
57369- 0 0 0 0 0 0 0 0 0 0 0 0
57370- 6 6 6 18 18 18 46 46 46 90 90 90
57371- 46 46 46 18 18 18 6 6 6 182 182 182
57372-253 253 253 246 246 246 206 206 206 190 190 190
57373-190 190 190 190 190 190 190 190 190 190 190 190
57374-206 206 206 231 231 231 250 250 250 253 253 253
57375-253 253 253 253 253 253 253 253 253 253 253 253
57376-202 202 202 14 14 14 2 2 6 2 2 6
57377- 2 2 6 2 2 6 2 2 6 2 2 6
57378- 42 42 42 86 86 86 42 42 42 18 18 18
57379- 6 6 6 0 0 0 0 0 0 0 0 0
57380- 0 0 0 0 0 0 0 0 0 0 0 0
57381- 0 0 0 0 0 0 0 0 0 0 0 0
57382- 0 0 0 0 0 0 0 0 0 0 0 0
57383- 0 0 0 0 0 0 0 0 0 0 0 0
57384- 0 0 0 0 0 0 0 0 0 0 0 0
57385- 0 0 0 0 0 0 0 0 0 0 0 0
57386- 0 0 0 0 0 0 0 0 0 0 0 0
57387- 0 0 0 0 0 0 0 0 0 0 0 0
57388- 0 0 0 0 0 0 0 0 0 0 0 0
57389- 0 0 0 0 0 0 0 0 0 6 6 6
57390- 14 14 14 38 38 38 74 74 74 66 66 66
57391- 2 2 6 6 6 6 90 90 90 250 250 250
57392-253 253 253 253 253 253 238 238 238 198 198 198
57393-190 190 190 190 190 190 195 195 195 221 221 221
57394-246 246 246 253 253 253 253 253 253 253 253 253
57395-253 253 253 253 253 253 253 253 253 253 253 253
57396-253 253 253 82 82 82 2 2 6 2 2 6
57397- 2 2 6 2 2 6 2 2 6 2 2 6
57398- 2 2 6 78 78 78 70 70 70 34 34 34
57399- 14 14 14 6 6 6 0 0 0 0 0 0
57400- 0 0 0 0 0 0 0 0 0 0 0 0
57401- 0 0 0 0 0 0 0 0 0 0 0 0
57402- 0 0 0 0 0 0 0 0 0 0 0 0
57403- 0 0 0 0 0 0 0 0 0 0 0 0
57404- 0 0 0 0 0 0 0 0 0 0 0 0
57405- 0 0 0 0 0 0 0 0 0 0 0 0
57406- 0 0 0 0 0 0 0 0 0 0 0 0
57407- 0 0 0 0 0 0 0 0 0 0 0 0
57408- 0 0 0 0 0 0 0 0 0 0 0 0
57409- 0 0 0 0 0 0 0 0 0 14 14 14
57410- 34 34 34 66 66 66 78 78 78 6 6 6
57411- 2 2 6 18 18 18 218 218 218 253 253 253
57412-253 253 253 253 253 253 253 253 253 246 246 246
57413-226 226 226 231 231 231 246 246 246 253 253 253
57414-253 253 253 253 253 253 253 253 253 253 253 253
57415-253 253 253 253 253 253 253 253 253 253 253 253
57416-253 253 253 178 178 178 2 2 6 2 2 6
57417- 2 2 6 2 2 6 2 2 6 2 2 6
57418- 2 2 6 18 18 18 90 90 90 62 62 62
57419- 30 30 30 10 10 10 0 0 0 0 0 0
57420- 0 0 0 0 0 0 0 0 0 0 0 0
57421- 0 0 0 0 0 0 0 0 0 0 0 0
57422- 0 0 0 0 0 0 0 0 0 0 0 0
57423- 0 0 0 0 0 0 0 0 0 0 0 0
57424- 0 0 0 0 0 0 0 0 0 0 0 0
57425- 0 0 0 0 0 0 0 0 0 0 0 0
57426- 0 0 0 0 0 0 0 0 0 0 0 0
57427- 0 0 0 0 0 0 0 0 0 0 0 0
57428- 0 0 0 0 0 0 0 0 0 0 0 0
57429- 0 0 0 0 0 0 10 10 10 26 26 26
57430- 58 58 58 90 90 90 18 18 18 2 2 6
57431- 2 2 6 110 110 110 253 253 253 253 253 253
57432-253 253 253 253 253 253 253 253 253 253 253 253
57433-250 250 250 253 253 253 253 253 253 253 253 253
57434-253 253 253 253 253 253 253 253 253 253 253 253
57435-253 253 253 253 253 253 253 253 253 253 253 253
57436-253 253 253 231 231 231 18 18 18 2 2 6
57437- 2 2 6 2 2 6 2 2 6 2 2 6
57438- 2 2 6 2 2 6 18 18 18 94 94 94
57439- 54 54 54 26 26 26 10 10 10 0 0 0
57440- 0 0 0 0 0 0 0 0 0 0 0 0
57441- 0 0 0 0 0 0 0 0 0 0 0 0
57442- 0 0 0 0 0 0 0 0 0 0 0 0
57443- 0 0 0 0 0 0 0 0 0 0 0 0
57444- 0 0 0 0 0 0 0 0 0 0 0 0
57445- 0 0 0 0 0 0 0 0 0 0 0 0
57446- 0 0 0 0 0 0 0 0 0 0 0 0
57447- 0 0 0 0 0 0 0 0 0 0 0 0
57448- 0 0 0 0 0 0 0 0 0 0 0 0
57449- 0 0 0 6 6 6 22 22 22 50 50 50
57450- 90 90 90 26 26 26 2 2 6 2 2 6
57451- 14 14 14 195 195 195 250 250 250 253 253 253
57452-253 253 253 253 253 253 253 253 253 253 253 253
57453-253 253 253 253 253 253 253 253 253 253 253 253
57454-253 253 253 253 253 253 253 253 253 253 253 253
57455-253 253 253 253 253 253 253 253 253 253 253 253
57456-250 250 250 242 242 242 54 54 54 2 2 6
57457- 2 2 6 2 2 6 2 2 6 2 2 6
57458- 2 2 6 2 2 6 2 2 6 38 38 38
57459- 86 86 86 50 50 50 22 22 22 6 6 6
57460- 0 0 0 0 0 0 0 0 0 0 0 0
57461- 0 0 0 0 0 0 0 0 0 0 0 0
57462- 0 0 0 0 0 0 0 0 0 0 0 0
57463- 0 0 0 0 0 0 0 0 0 0 0 0
57464- 0 0 0 0 0 0 0 0 0 0 0 0
57465- 0 0 0 0 0 0 0 0 0 0 0 0
57466- 0 0 0 0 0 0 0 0 0 0 0 0
57467- 0 0 0 0 0 0 0 0 0 0 0 0
57468- 0 0 0 0 0 0 0 0 0 0 0 0
57469- 6 6 6 14 14 14 38 38 38 82 82 82
57470- 34 34 34 2 2 6 2 2 6 2 2 6
57471- 42 42 42 195 195 195 246 246 246 253 253 253
57472-253 253 253 253 253 253 253 253 253 250 250 250
57473-242 242 242 242 242 242 250 250 250 253 253 253
57474-253 253 253 253 253 253 253 253 253 253 253 253
57475-253 253 253 250 250 250 246 246 246 238 238 238
57476-226 226 226 231 231 231 101 101 101 6 6 6
57477- 2 2 6 2 2 6 2 2 6 2 2 6
57478- 2 2 6 2 2 6 2 2 6 2 2 6
57479- 38 38 38 82 82 82 42 42 42 14 14 14
57480- 6 6 6 0 0 0 0 0 0 0 0 0
57481- 0 0 0 0 0 0 0 0 0 0 0 0
57482- 0 0 0 0 0 0 0 0 0 0 0 0
57483- 0 0 0 0 0 0 0 0 0 0 0 0
57484- 0 0 0 0 0 0 0 0 0 0 0 0
57485- 0 0 0 0 0 0 0 0 0 0 0 0
57486- 0 0 0 0 0 0 0 0 0 0 0 0
57487- 0 0 0 0 0 0 0 0 0 0 0 0
57488- 0 0 0 0 0 0 0 0 0 0 0 0
57489- 10 10 10 26 26 26 62 62 62 66 66 66
57490- 2 2 6 2 2 6 2 2 6 6 6 6
57491- 70 70 70 170 170 170 206 206 206 234 234 234
57492-246 246 246 250 250 250 250 250 250 238 238 238
57493-226 226 226 231 231 231 238 238 238 250 250 250
57494-250 250 250 250 250 250 246 246 246 231 231 231
57495-214 214 214 206 206 206 202 202 202 202 202 202
57496-198 198 198 202 202 202 182 182 182 18 18 18
57497- 2 2 6 2 2 6 2 2 6 2 2 6
57498- 2 2 6 2 2 6 2 2 6 2 2 6
57499- 2 2 6 62 62 62 66 66 66 30 30 30
57500- 10 10 10 0 0 0 0 0 0 0 0 0
57501- 0 0 0 0 0 0 0 0 0 0 0 0
57502- 0 0 0 0 0 0 0 0 0 0 0 0
57503- 0 0 0 0 0 0 0 0 0 0 0 0
57504- 0 0 0 0 0 0 0 0 0 0 0 0
57505- 0 0 0 0 0 0 0 0 0 0 0 0
57506- 0 0 0 0 0 0 0 0 0 0 0 0
57507- 0 0 0 0 0 0 0 0 0 0 0 0
57508- 0 0 0 0 0 0 0 0 0 0 0 0
57509- 14 14 14 42 42 42 82 82 82 18 18 18
57510- 2 2 6 2 2 6 2 2 6 10 10 10
57511- 94 94 94 182 182 182 218 218 218 242 242 242
57512-250 250 250 253 253 253 253 253 253 250 250 250
57513-234 234 234 253 253 253 253 253 253 253 253 253
57514-253 253 253 253 253 253 253 253 253 246 246 246
57515-238 238 238 226 226 226 210 210 210 202 202 202
57516-195 195 195 195 195 195 210 210 210 158 158 158
57517- 6 6 6 14 14 14 50 50 50 14 14 14
57518- 2 2 6 2 2 6 2 2 6 2 2 6
57519- 2 2 6 6 6 6 86 86 86 46 46 46
57520- 18 18 18 6 6 6 0 0 0 0 0 0
57521- 0 0 0 0 0 0 0 0 0 0 0 0
57522- 0 0 0 0 0 0 0 0 0 0 0 0
57523- 0 0 0 0 0 0 0 0 0 0 0 0
57524- 0 0 0 0 0 0 0 0 0 0 0 0
57525- 0 0 0 0 0 0 0 0 0 0 0 0
57526- 0 0 0 0 0 0 0 0 0 0 0 0
57527- 0 0 0 0 0 0 0 0 0 0 0 0
57528- 0 0 0 0 0 0 0 0 0 6 6 6
57529- 22 22 22 54 54 54 70 70 70 2 2 6
57530- 2 2 6 10 10 10 2 2 6 22 22 22
57531-166 166 166 231 231 231 250 250 250 253 253 253
57532-253 253 253 253 253 253 253 253 253 250 250 250
57533-242 242 242 253 253 253 253 253 253 253 253 253
57534-253 253 253 253 253 253 253 253 253 253 253 253
57535-253 253 253 253 253 253 253 253 253 246 246 246
57536-231 231 231 206 206 206 198 198 198 226 226 226
57537- 94 94 94 2 2 6 6 6 6 38 38 38
57538- 30 30 30 2 2 6 2 2 6 2 2 6
57539- 2 2 6 2 2 6 62 62 62 66 66 66
57540- 26 26 26 10 10 10 0 0 0 0 0 0
57541- 0 0 0 0 0 0 0 0 0 0 0 0
57542- 0 0 0 0 0 0 0 0 0 0 0 0
57543- 0 0 0 0 0 0 0 0 0 0 0 0
57544- 0 0 0 0 0 0 0 0 0 0 0 0
57545- 0 0 0 0 0 0 0 0 0 0 0 0
57546- 0 0 0 0 0 0 0 0 0 0 0 0
57547- 0 0 0 0 0 0 0 0 0 0 0 0
57548- 0 0 0 0 0 0 0 0 0 10 10 10
57549- 30 30 30 74 74 74 50 50 50 2 2 6
57550- 26 26 26 26 26 26 2 2 6 106 106 106
57551-238 238 238 253 253 253 253 253 253 253 253 253
57552-253 253 253 253 253 253 253 253 253 253 253 253
57553-253 253 253 253 253 253 253 253 253 253 253 253
57554-253 253 253 253 253 253 253 253 253 253 253 253
57555-253 253 253 253 253 253 253 253 253 253 253 253
57556-253 253 253 246 246 246 218 218 218 202 202 202
57557-210 210 210 14 14 14 2 2 6 2 2 6
57558- 30 30 30 22 22 22 2 2 6 2 2 6
57559- 2 2 6 2 2 6 18 18 18 86 86 86
57560- 42 42 42 14 14 14 0 0 0 0 0 0
57561- 0 0 0 0 0 0 0 0 0 0 0 0
57562- 0 0 0 0 0 0 0 0 0 0 0 0
57563- 0 0 0 0 0 0 0 0 0 0 0 0
57564- 0 0 0 0 0 0 0 0 0 0 0 0
57565- 0 0 0 0 0 0 0 0 0 0 0 0
57566- 0 0 0 0 0 0 0 0 0 0 0 0
57567- 0 0 0 0 0 0 0 0 0 0 0 0
57568- 0 0 0 0 0 0 0 0 0 14 14 14
57569- 42 42 42 90 90 90 22 22 22 2 2 6
57570- 42 42 42 2 2 6 18 18 18 218 218 218
57571-253 253 253 253 253 253 253 253 253 253 253 253
57572-253 253 253 253 253 253 253 253 253 253 253 253
57573-253 253 253 253 253 253 253 253 253 253 253 253
57574-253 253 253 253 253 253 253 253 253 253 253 253
57575-253 253 253 253 253 253 253 253 253 253 253 253
57576-253 253 253 253 253 253 250 250 250 221 221 221
57577-218 218 218 101 101 101 2 2 6 14 14 14
57578- 18 18 18 38 38 38 10 10 10 2 2 6
57579- 2 2 6 2 2 6 2 2 6 78 78 78
57580- 58 58 58 22 22 22 6 6 6 0 0 0
57581- 0 0 0 0 0 0 0 0 0 0 0 0
57582- 0 0 0 0 0 0 0 0 0 0 0 0
57583- 0 0 0 0 0 0 0 0 0 0 0 0
57584- 0 0 0 0 0 0 0 0 0 0 0 0
57585- 0 0 0 0 0 0 0 0 0 0 0 0
57586- 0 0 0 0 0 0 0 0 0 0 0 0
57587- 0 0 0 0 0 0 0 0 0 0 0 0
57588- 0 0 0 0 0 0 6 6 6 18 18 18
57589- 54 54 54 82 82 82 2 2 6 26 26 26
57590- 22 22 22 2 2 6 123 123 123 253 253 253
57591-253 253 253 253 253 253 253 253 253 253 253 253
57592-253 253 253 253 253 253 253 253 253 253 253 253
57593-253 253 253 253 253 253 253 253 253 253 253 253
57594-253 253 253 253 253 253 253 253 253 253 253 253
57595-253 253 253 253 253 253 253 253 253 253 253 253
57596-253 253 253 253 253 253 253 253 253 250 250 250
57597-238 238 238 198 198 198 6 6 6 38 38 38
57598- 58 58 58 26 26 26 38 38 38 2 2 6
57599- 2 2 6 2 2 6 2 2 6 46 46 46
57600- 78 78 78 30 30 30 10 10 10 0 0 0
57601- 0 0 0 0 0 0 0 0 0 0 0 0
57602- 0 0 0 0 0 0 0 0 0 0 0 0
57603- 0 0 0 0 0 0 0 0 0 0 0 0
57604- 0 0 0 0 0 0 0 0 0 0 0 0
57605- 0 0 0 0 0 0 0 0 0 0 0 0
57606- 0 0 0 0 0 0 0 0 0 0 0 0
57607- 0 0 0 0 0 0 0 0 0 0 0 0
57608- 0 0 0 0 0 0 10 10 10 30 30 30
57609- 74 74 74 58 58 58 2 2 6 42 42 42
57610- 2 2 6 22 22 22 231 231 231 253 253 253
57611-253 253 253 253 253 253 253 253 253 253 253 253
57612-253 253 253 253 253 253 253 253 253 250 250 250
57613-253 253 253 253 253 253 253 253 253 253 253 253
57614-253 253 253 253 253 253 253 253 253 253 253 253
57615-253 253 253 253 253 253 253 253 253 253 253 253
57616-253 253 253 253 253 253 253 253 253 253 253 253
57617-253 253 253 246 246 246 46 46 46 38 38 38
57618- 42 42 42 14 14 14 38 38 38 14 14 14
57619- 2 2 6 2 2 6 2 2 6 6 6 6
57620- 86 86 86 46 46 46 14 14 14 0 0 0
57621- 0 0 0 0 0 0 0 0 0 0 0 0
57622- 0 0 0 0 0 0 0 0 0 0 0 0
57623- 0 0 0 0 0 0 0 0 0 0 0 0
57624- 0 0 0 0 0 0 0 0 0 0 0 0
57625- 0 0 0 0 0 0 0 0 0 0 0 0
57626- 0 0 0 0 0 0 0 0 0 0 0 0
57627- 0 0 0 0 0 0 0 0 0 0 0 0
57628- 0 0 0 6 6 6 14 14 14 42 42 42
57629- 90 90 90 18 18 18 18 18 18 26 26 26
57630- 2 2 6 116 116 116 253 253 253 253 253 253
57631-253 253 253 253 253 253 253 253 253 253 253 253
57632-253 253 253 253 253 253 250 250 250 238 238 238
57633-253 253 253 253 253 253 253 253 253 253 253 253
57634-253 253 253 253 253 253 253 253 253 253 253 253
57635-253 253 253 253 253 253 253 253 253 253 253 253
57636-253 253 253 253 253 253 253 253 253 253 253 253
57637-253 253 253 253 253 253 94 94 94 6 6 6
57638- 2 2 6 2 2 6 10 10 10 34 34 34
57639- 2 2 6 2 2 6 2 2 6 2 2 6
57640- 74 74 74 58 58 58 22 22 22 6 6 6
57641- 0 0 0 0 0 0 0 0 0 0 0 0
57642- 0 0 0 0 0 0 0 0 0 0 0 0
57643- 0 0 0 0 0 0 0 0 0 0 0 0
57644- 0 0 0 0 0 0 0 0 0 0 0 0
57645- 0 0 0 0 0 0 0 0 0 0 0 0
57646- 0 0 0 0 0 0 0 0 0 0 0 0
57647- 0 0 0 0 0 0 0 0 0 0 0 0
57648- 0 0 0 10 10 10 26 26 26 66 66 66
57649- 82 82 82 2 2 6 38 38 38 6 6 6
57650- 14 14 14 210 210 210 253 253 253 253 253 253
57651-253 253 253 253 253 253 253 253 253 253 253 253
57652-253 253 253 253 253 253 246 246 246 242 242 242
57653-253 253 253 253 253 253 253 253 253 253 253 253
57654-253 253 253 253 253 253 253 253 253 253 253 253
57655-253 253 253 253 253 253 253 253 253 253 253 253
57656-253 253 253 253 253 253 253 253 253 253 253 253
57657-253 253 253 253 253 253 144 144 144 2 2 6
57658- 2 2 6 2 2 6 2 2 6 46 46 46
57659- 2 2 6 2 2 6 2 2 6 2 2 6
57660- 42 42 42 74 74 74 30 30 30 10 10 10
57661- 0 0 0 0 0 0 0 0 0 0 0 0
57662- 0 0 0 0 0 0 0 0 0 0 0 0
57663- 0 0 0 0 0 0 0 0 0 0 0 0
57664- 0 0 0 0 0 0 0 0 0 0 0 0
57665- 0 0 0 0 0 0 0 0 0 0 0 0
57666- 0 0 0 0 0 0 0 0 0 0 0 0
57667- 0 0 0 0 0 0 0 0 0 0 0 0
57668- 6 6 6 14 14 14 42 42 42 90 90 90
57669- 26 26 26 6 6 6 42 42 42 2 2 6
57670- 74 74 74 250 250 250 253 253 253 253 253 253
57671-253 253 253 253 253 253 253 253 253 253 253 253
57672-253 253 253 253 253 253 242 242 242 242 242 242
57673-253 253 253 253 253 253 253 253 253 253 253 253
57674-253 253 253 253 253 253 253 253 253 253 253 253
57675-253 253 253 253 253 253 253 253 253 253 253 253
57676-253 253 253 253 253 253 253 253 253 253 253 253
57677-253 253 253 253 253 253 182 182 182 2 2 6
57678- 2 2 6 2 2 6 2 2 6 46 46 46
57679- 2 2 6 2 2 6 2 2 6 2 2 6
57680- 10 10 10 86 86 86 38 38 38 10 10 10
57681- 0 0 0 0 0 0 0 0 0 0 0 0
57682- 0 0 0 0 0 0 0 0 0 0 0 0
57683- 0 0 0 0 0 0 0 0 0 0 0 0
57684- 0 0 0 0 0 0 0 0 0 0 0 0
57685- 0 0 0 0 0 0 0 0 0 0 0 0
57686- 0 0 0 0 0 0 0 0 0 0 0 0
57687- 0 0 0 0 0 0 0 0 0 0 0 0
57688- 10 10 10 26 26 26 66 66 66 82 82 82
57689- 2 2 6 22 22 22 18 18 18 2 2 6
57690-149 149 149 253 253 253 253 253 253 253 253 253
57691-253 253 253 253 253 253 253 253 253 253 253 253
57692-253 253 253 253 253 253 234 234 234 242 242 242
57693-253 253 253 253 253 253 253 253 253 253 253 253
57694-253 253 253 253 253 253 253 253 253 253 253 253
57695-253 253 253 253 253 253 253 253 253 253 253 253
57696-253 253 253 253 253 253 253 253 253 253 253 253
57697-253 253 253 253 253 253 206 206 206 2 2 6
57698- 2 2 6 2 2 6 2 2 6 38 38 38
57699- 2 2 6 2 2 6 2 2 6 2 2 6
57700- 6 6 6 86 86 86 46 46 46 14 14 14
57701- 0 0 0 0 0 0 0 0 0 0 0 0
57702- 0 0 0 0 0 0 0 0 0 0 0 0
57703- 0 0 0 0 0 0 0 0 0 0 0 0
57704- 0 0 0 0 0 0 0 0 0 0 0 0
57705- 0 0 0 0 0 0 0 0 0 0 0 0
57706- 0 0 0 0 0 0 0 0 0 0 0 0
57707- 0 0 0 0 0 0 0 0 0 6 6 6
57708- 18 18 18 46 46 46 86 86 86 18 18 18
57709- 2 2 6 34 34 34 10 10 10 6 6 6
57710-210 210 210 253 253 253 253 253 253 253 253 253
57711-253 253 253 253 253 253 253 253 253 253 253 253
57712-253 253 253 253 253 253 234 234 234 242 242 242
57713-253 253 253 253 253 253 253 253 253 253 253 253
57714-253 253 253 253 253 253 253 253 253 253 253 253
57715-253 253 253 253 253 253 253 253 253 253 253 253
57716-253 253 253 253 253 253 253 253 253 253 253 253
57717-253 253 253 253 253 253 221 221 221 6 6 6
57718- 2 2 6 2 2 6 6 6 6 30 30 30
57719- 2 2 6 2 2 6 2 2 6 2 2 6
57720- 2 2 6 82 82 82 54 54 54 18 18 18
57721- 6 6 6 0 0 0 0 0 0 0 0 0
57722- 0 0 0 0 0 0 0 0 0 0 0 0
57723- 0 0 0 0 0 0 0 0 0 0 0 0
57724- 0 0 0 0 0 0 0 0 0 0 0 0
57725- 0 0 0 0 0 0 0 0 0 0 0 0
57726- 0 0 0 0 0 0 0 0 0 0 0 0
57727- 0 0 0 0 0 0 0 0 0 10 10 10
57728- 26 26 26 66 66 66 62 62 62 2 2 6
57729- 2 2 6 38 38 38 10 10 10 26 26 26
57730-238 238 238 253 253 253 253 253 253 253 253 253
57731-253 253 253 253 253 253 253 253 253 253 253 253
57732-253 253 253 253 253 253 231 231 231 238 238 238
57733-253 253 253 253 253 253 253 253 253 253 253 253
57734-253 253 253 253 253 253 253 253 253 253 253 253
57735-253 253 253 253 253 253 253 253 253 253 253 253
57736-253 253 253 253 253 253 253 253 253 253 253 253
57737-253 253 253 253 253 253 231 231 231 6 6 6
57738- 2 2 6 2 2 6 10 10 10 30 30 30
57739- 2 2 6 2 2 6 2 2 6 2 2 6
57740- 2 2 6 66 66 66 58 58 58 22 22 22
57741- 6 6 6 0 0 0 0 0 0 0 0 0
57742- 0 0 0 0 0 0 0 0 0 0 0 0
57743- 0 0 0 0 0 0 0 0 0 0 0 0
57744- 0 0 0 0 0 0 0 0 0 0 0 0
57745- 0 0 0 0 0 0 0 0 0 0 0 0
57746- 0 0 0 0 0 0 0 0 0 0 0 0
57747- 0 0 0 0 0 0 0 0 0 10 10 10
57748- 38 38 38 78 78 78 6 6 6 2 2 6
57749- 2 2 6 46 46 46 14 14 14 42 42 42
57750-246 246 246 253 253 253 253 253 253 253 253 253
57751-253 253 253 253 253 253 253 253 253 253 253 253
57752-253 253 253 253 253 253 231 231 231 242 242 242
57753-253 253 253 253 253 253 253 253 253 253 253 253
57754-253 253 253 253 253 253 253 253 253 253 253 253
57755-253 253 253 253 253 253 253 253 253 253 253 253
57756-253 253 253 253 253 253 253 253 253 253 253 253
57757-253 253 253 253 253 253 234 234 234 10 10 10
57758- 2 2 6 2 2 6 22 22 22 14 14 14
57759- 2 2 6 2 2 6 2 2 6 2 2 6
57760- 2 2 6 66 66 66 62 62 62 22 22 22
57761- 6 6 6 0 0 0 0 0 0 0 0 0
57762- 0 0 0 0 0 0 0 0 0 0 0 0
57763- 0 0 0 0 0 0 0 0 0 0 0 0
57764- 0 0 0 0 0 0 0 0 0 0 0 0
57765- 0 0 0 0 0 0 0 0 0 0 0 0
57766- 0 0 0 0 0 0 0 0 0 0 0 0
57767- 0 0 0 0 0 0 6 6 6 18 18 18
57768- 50 50 50 74 74 74 2 2 6 2 2 6
57769- 14 14 14 70 70 70 34 34 34 62 62 62
57770-250 250 250 253 253 253 253 253 253 253 253 253
57771-253 253 253 253 253 253 253 253 253 253 253 253
57772-253 253 253 253 253 253 231 231 231 246 246 246
57773-253 253 253 253 253 253 253 253 253 253 253 253
57774-253 253 253 253 253 253 253 253 253 253 253 253
57775-253 253 253 253 253 253 253 253 253 253 253 253
57776-253 253 253 253 253 253 253 253 253 253 253 253
57777-253 253 253 253 253 253 234 234 234 14 14 14
57778- 2 2 6 2 2 6 30 30 30 2 2 6
57779- 2 2 6 2 2 6 2 2 6 2 2 6
57780- 2 2 6 66 66 66 62 62 62 22 22 22
57781- 6 6 6 0 0 0 0 0 0 0 0 0
57782- 0 0 0 0 0 0 0 0 0 0 0 0
57783- 0 0 0 0 0 0 0 0 0 0 0 0
57784- 0 0 0 0 0 0 0 0 0 0 0 0
57785- 0 0 0 0 0 0 0 0 0 0 0 0
57786- 0 0 0 0 0 0 0 0 0 0 0 0
57787- 0 0 0 0 0 0 6 6 6 18 18 18
57788- 54 54 54 62 62 62 2 2 6 2 2 6
57789- 2 2 6 30 30 30 46 46 46 70 70 70
57790-250 250 250 253 253 253 253 253 253 253 253 253
57791-253 253 253 253 253 253 253 253 253 253 253 253
57792-253 253 253 253 253 253 231 231 231 246 246 246
57793-253 253 253 253 253 253 253 253 253 253 253 253
57794-253 253 253 253 253 253 253 253 253 253 253 253
57795-253 253 253 253 253 253 253 253 253 253 253 253
57796-253 253 253 253 253 253 253 253 253 253 253 253
57797-253 253 253 253 253 253 226 226 226 10 10 10
57798- 2 2 6 6 6 6 30 30 30 2 2 6
57799- 2 2 6 2 2 6 2 2 6 2 2 6
57800- 2 2 6 66 66 66 58 58 58 22 22 22
57801- 6 6 6 0 0 0 0 0 0 0 0 0
57802- 0 0 0 0 0 0 0 0 0 0 0 0
57803- 0 0 0 0 0 0 0 0 0 0 0 0
57804- 0 0 0 0 0 0 0 0 0 0 0 0
57805- 0 0 0 0 0 0 0 0 0 0 0 0
57806- 0 0 0 0 0 0 0 0 0 0 0 0
57807- 0 0 0 0 0 0 6 6 6 22 22 22
57808- 58 58 58 62 62 62 2 2 6 2 2 6
57809- 2 2 6 2 2 6 30 30 30 78 78 78
57810-250 250 250 253 253 253 253 253 253 253 253 253
57811-253 253 253 253 253 253 253 253 253 253 253 253
57812-253 253 253 253 253 253 231 231 231 246 246 246
57813-253 253 253 253 253 253 253 253 253 253 253 253
57814-253 253 253 253 253 253 253 253 253 253 253 253
57815-253 253 253 253 253 253 253 253 253 253 253 253
57816-253 253 253 253 253 253 253 253 253 253 253 253
57817-253 253 253 253 253 253 206 206 206 2 2 6
57818- 22 22 22 34 34 34 18 14 6 22 22 22
57819- 26 26 26 18 18 18 6 6 6 2 2 6
57820- 2 2 6 82 82 82 54 54 54 18 18 18
57821- 6 6 6 0 0 0 0 0 0 0 0 0
57822- 0 0 0 0 0 0 0 0 0 0 0 0
57823- 0 0 0 0 0 0 0 0 0 0 0 0
57824- 0 0 0 0 0 0 0 0 0 0 0 0
57825- 0 0 0 0 0 0 0 0 0 0 0 0
57826- 0 0 0 0 0 0 0 0 0 0 0 0
57827- 0 0 0 0 0 0 6 6 6 26 26 26
57828- 62 62 62 106 106 106 74 54 14 185 133 11
57829-210 162 10 121 92 8 6 6 6 62 62 62
57830-238 238 238 253 253 253 253 253 253 253 253 253
57831-253 253 253 253 253 253 253 253 253 253 253 253
57832-253 253 253 253 253 253 231 231 231 246 246 246
57833-253 253 253 253 253 253 253 253 253 253 253 253
57834-253 253 253 253 253 253 253 253 253 253 253 253
57835-253 253 253 253 253 253 253 253 253 253 253 253
57836-253 253 253 253 253 253 253 253 253 253 253 253
57837-253 253 253 253 253 253 158 158 158 18 18 18
57838- 14 14 14 2 2 6 2 2 6 2 2 6
57839- 6 6 6 18 18 18 66 66 66 38 38 38
57840- 6 6 6 94 94 94 50 50 50 18 18 18
57841- 6 6 6 0 0 0 0 0 0 0 0 0
57842- 0 0 0 0 0 0 0 0 0 0 0 0
57843- 0 0 0 0 0 0 0 0 0 0 0 0
57844- 0 0 0 0 0 0 0 0 0 0 0 0
57845- 0 0 0 0 0 0 0 0 0 0 0 0
57846- 0 0 0 0 0 0 0 0 0 6 6 6
57847- 10 10 10 10 10 10 18 18 18 38 38 38
57848- 78 78 78 142 134 106 216 158 10 242 186 14
57849-246 190 14 246 190 14 156 118 10 10 10 10
57850- 90 90 90 238 238 238 253 253 253 253 253 253
57851-253 253 253 253 253 253 253 253 253 253 253 253
57852-253 253 253 253 253 253 231 231 231 250 250 250
57853-253 253 253 253 253 253 253 253 253 253 253 253
57854-253 253 253 253 253 253 253 253 253 253 253 253
57855-253 253 253 253 253 253 253 253 253 253 253 253
57856-253 253 253 253 253 253 253 253 253 246 230 190
57857-238 204 91 238 204 91 181 142 44 37 26 9
57858- 2 2 6 2 2 6 2 2 6 2 2 6
57859- 2 2 6 2 2 6 38 38 38 46 46 46
57860- 26 26 26 106 106 106 54 54 54 18 18 18
57861- 6 6 6 0 0 0 0 0 0 0 0 0
57862- 0 0 0 0 0 0 0 0 0 0 0 0
57863- 0 0 0 0 0 0 0 0 0 0 0 0
57864- 0 0 0 0 0 0 0 0 0 0 0 0
57865- 0 0 0 0 0 0 0 0 0 0 0 0
57866- 0 0 0 6 6 6 14 14 14 22 22 22
57867- 30 30 30 38 38 38 50 50 50 70 70 70
57868-106 106 106 190 142 34 226 170 11 242 186 14
57869-246 190 14 246 190 14 246 190 14 154 114 10
57870- 6 6 6 74 74 74 226 226 226 253 253 253
57871-253 253 253 253 253 253 253 253 253 253 253 253
57872-253 253 253 253 253 253 231 231 231 250 250 250
57873-253 253 253 253 253 253 253 253 253 253 253 253
57874-253 253 253 253 253 253 253 253 253 253 253 253
57875-253 253 253 253 253 253 253 253 253 253 253 253
57876-253 253 253 253 253 253 253 253 253 228 184 62
57877-241 196 14 241 208 19 232 195 16 38 30 10
57878- 2 2 6 2 2 6 2 2 6 2 2 6
57879- 2 2 6 6 6 6 30 30 30 26 26 26
57880-203 166 17 154 142 90 66 66 66 26 26 26
57881- 6 6 6 0 0 0 0 0 0 0 0 0
57882- 0 0 0 0 0 0 0 0 0 0 0 0
57883- 0 0 0 0 0 0 0 0 0 0 0 0
57884- 0 0 0 0 0 0 0 0 0 0 0 0
57885- 0 0 0 0 0 0 0 0 0 0 0 0
57886- 6 6 6 18 18 18 38 38 38 58 58 58
57887- 78 78 78 86 86 86 101 101 101 123 123 123
57888-175 146 61 210 150 10 234 174 13 246 186 14
57889-246 190 14 246 190 14 246 190 14 238 190 10
57890-102 78 10 2 2 6 46 46 46 198 198 198
57891-253 253 253 253 253 253 253 253 253 253 253 253
57892-253 253 253 253 253 253 234 234 234 242 242 242
57893-253 253 253 253 253 253 253 253 253 253 253 253
57894-253 253 253 253 253 253 253 253 253 253 253 253
57895-253 253 253 253 253 253 253 253 253 253 253 253
57896-253 253 253 253 253 253 253 253 253 224 178 62
57897-242 186 14 241 196 14 210 166 10 22 18 6
57898- 2 2 6 2 2 6 2 2 6 2 2 6
57899- 2 2 6 2 2 6 6 6 6 121 92 8
57900-238 202 15 232 195 16 82 82 82 34 34 34
57901- 10 10 10 0 0 0 0 0 0 0 0 0
57902- 0 0 0 0 0 0 0 0 0 0 0 0
57903- 0 0 0 0 0 0 0 0 0 0 0 0
57904- 0 0 0 0 0 0 0 0 0 0 0 0
57905- 0 0 0 0 0 0 0 0 0 0 0 0
57906- 14 14 14 38 38 38 70 70 70 154 122 46
57907-190 142 34 200 144 11 197 138 11 197 138 11
57908-213 154 11 226 170 11 242 186 14 246 190 14
57909-246 190 14 246 190 14 246 190 14 246 190 14
57910-225 175 15 46 32 6 2 2 6 22 22 22
57911-158 158 158 250 250 250 253 253 253 253 253 253
57912-253 253 253 253 253 253 253 253 253 253 253 253
57913-253 253 253 253 253 253 253 253 253 253 253 253
57914-253 253 253 253 253 253 253 253 253 253 253 253
57915-253 253 253 253 253 253 253 253 253 253 253 253
57916-253 253 253 250 250 250 242 242 242 224 178 62
57917-239 182 13 236 186 11 213 154 11 46 32 6
57918- 2 2 6 2 2 6 2 2 6 2 2 6
57919- 2 2 6 2 2 6 61 42 6 225 175 15
57920-238 190 10 236 186 11 112 100 78 42 42 42
57921- 14 14 14 0 0 0 0 0 0 0 0 0
57922- 0 0 0 0 0 0 0 0 0 0 0 0
57923- 0 0 0 0 0 0 0 0 0 0 0 0
57924- 0 0 0 0 0 0 0 0 0 0 0 0
57925- 0 0 0 0 0 0 0 0 0 6 6 6
57926- 22 22 22 54 54 54 154 122 46 213 154 11
57927-226 170 11 230 174 11 226 170 11 226 170 11
57928-236 178 12 242 186 14 246 190 14 246 190 14
57929-246 190 14 246 190 14 246 190 14 246 190 14
57930-241 196 14 184 144 12 10 10 10 2 2 6
57931- 6 6 6 116 116 116 242 242 242 253 253 253
57932-253 253 253 253 253 253 253 253 253 253 253 253
57933-253 253 253 253 253 253 253 253 253 253 253 253
57934-253 253 253 253 253 253 253 253 253 253 253 253
57935-253 253 253 253 253 253 253 253 253 253 253 253
57936-253 253 253 231 231 231 198 198 198 214 170 54
57937-236 178 12 236 178 12 210 150 10 137 92 6
57938- 18 14 6 2 2 6 2 2 6 2 2 6
57939- 6 6 6 70 47 6 200 144 11 236 178 12
57940-239 182 13 239 182 13 124 112 88 58 58 58
57941- 22 22 22 6 6 6 0 0 0 0 0 0
57942- 0 0 0 0 0 0 0 0 0 0 0 0
57943- 0 0 0 0 0 0 0 0 0 0 0 0
57944- 0 0 0 0 0 0 0 0 0 0 0 0
57945- 0 0 0 0 0 0 0 0 0 10 10 10
57946- 30 30 30 70 70 70 180 133 36 226 170 11
57947-239 182 13 242 186 14 242 186 14 246 186 14
57948-246 190 14 246 190 14 246 190 14 246 190 14
57949-246 190 14 246 190 14 246 190 14 246 190 14
57950-246 190 14 232 195 16 98 70 6 2 2 6
57951- 2 2 6 2 2 6 66 66 66 221 221 221
57952-253 253 253 253 253 253 253 253 253 253 253 253
57953-253 253 253 253 253 253 253 253 253 253 253 253
57954-253 253 253 253 253 253 253 253 253 253 253 253
57955-253 253 253 253 253 253 253 253 253 253 253 253
57956-253 253 253 206 206 206 198 198 198 214 166 58
57957-230 174 11 230 174 11 216 158 10 192 133 9
57958-163 110 8 116 81 8 102 78 10 116 81 8
57959-167 114 7 197 138 11 226 170 11 239 182 13
57960-242 186 14 242 186 14 162 146 94 78 78 78
57961- 34 34 34 14 14 14 6 6 6 0 0 0
57962- 0 0 0 0 0 0 0 0 0 0 0 0
57963- 0 0 0 0 0 0 0 0 0 0 0 0
57964- 0 0 0 0 0 0 0 0 0 0 0 0
57965- 0 0 0 0 0 0 0 0 0 6 6 6
57966- 30 30 30 78 78 78 190 142 34 226 170 11
57967-239 182 13 246 190 14 246 190 14 246 190 14
57968-246 190 14 246 190 14 246 190 14 246 190 14
57969-246 190 14 246 190 14 246 190 14 246 190 14
57970-246 190 14 241 196 14 203 166 17 22 18 6
57971- 2 2 6 2 2 6 2 2 6 38 38 38
57972-218 218 218 253 253 253 253 253 253 253 253 253
57973-253 253 253 253 253 253 253 253 253 253 253 253
57974-253 253 253 253 253 253 253 253 253 253 253 253
57975-253 253 253 253 253 253 253 253 253 253 253 253
57976-250 250 250 206 206 206 198 198 198 202 162 69
57977-226 170 11 236 178 12 224 166 10 210 150 10
57978-200 144 11 197 138 11 192 133 9 197 138 11
57979-210 150 10 226 170 11 242 186 14 246 190 14
57980-246 190 14 246 186 14 225 175 15 124 112 88
57981- 62 62 62 30 30 30 14 14 14 6 6 6
57982- 0 0 0 0 0 0 0 0 0 0 0 0
57983- 0 0 0 0 0 0 0 0 0 0 0 0
57984- 0 0 0 0 0 0 0 0 0 0 0 0
57985- 0 0 0 0 0 0 0 0 0 10 10 10
57986- 30 30 30 78 78 78 174 135 50 224 166 10
57987-239 182 13 246 190 14 246 190 14 246 190 14
57988-246 190 14 246 190 14 246 190 14 246 190 14
57989-246 190 14 246 190 14 246 190 14 246 190 14
57990-246 190 14 246 190 14 241 196 14 139 102 15
57991- 2 2 6 2 2 6 2 2 6 2 2 6
57992- 78 78 78 250 250 250 253 253 253 253 253 253
57993-253 253 253 253 253 253 253 253 253 253 253 253
57994-253 253 253 253 253 253 253 253 253 253 253 253
57995-253 253 253 253 253 253 253 253 253 253 253 253
57996-250 250 250 214 214 214 198 198 198 190 150 46
57997-219 162 10 236 178 12 234 174 13 224 166 10
57998-216 158 10 213 154 11 213 154 11 216 158 10
57999-226 170 11 239 182 13 246 190 14 246 190 14
58000-246 190 14 246 190 14 242 186 14 206 162 42
58001-101 101 101 58 58 58 30 30 30 14 14 14
58002- 6 6 6 0 0 0 0 0 0 0 0 0
58003- 0 0 0 0 0 0 0 0 0 0 0 0
58004- 0 0 0 0 0 0 0 0 0 0 0 0
58005- 0 0 0 0 0 0 0 0 0 10 10 10
58006- 30 30 30 74 74 74 174 135 50 216 158 10
58007-236 178 12 246 190 14 246 190 14 246 190 14
58008-246 190 14 246 190 14 246 190 14 246 190 14
58009-246 190 14 246 190 14 246 190 14 246 190 14
58010-246 190 14 246 190 14 241 196 14 226 184 13
58011- 61 42 6 2 2 6 2 2 6 2 2 6
58012- 22 22 22 238 238 238 253 253 253 253 253 253
58013-253 253 253 253 253 253 253 253 253 253 253 253
58014-253 253 253 253 253 253 253 253 253 253 253 253
58015-253 253 253 253 253 253 253 253 253 253 253 253
58016-253 253 253 226 226 226 187 187 187 180 133 36
58017-216 158 10 236 178 12 239 182 13 236 178 12
58018-230 174 11 226 170 11 226 170 11 230 174 11
58019-236 178 12 242 186 14 246 190 14 246 190 14
58020-246 190 14 246 190 14 246 186 14 239 182 13
58021-206 162 42 106 106 106 66 66 66 34 34 34
58022- 14 14 14 6 6 6 0 0 0 0 0 0
58023- 0 0 0 0 0 0 0 0 0 0 0 0
58024- 0 0 0 0 0 0 0 0 0 0 0 0
58025- 0 0 0 0 0 0 0 0 0 6 6 6
58026- 26 26 26 70 70 70 163 133 67 213 154 11
58027-236 178 12 246 190 14 246 190 14 246 190 14
58028-246 190 14 246 190 14 246 190 14 246 190 14
58029-246 190 14 246 190 14 246 190 14 246 190 14
58030-246 190 14 246 190 14 246 190 14 241 196 14
58031-190 146 13 18 14 6 2 2 6 2 2 6
58032- 46 46 46 246 246 246 253 253 253 253 253 253
58033-253 253 253 253 253 253 253 253 253 253 253 253
58034-253 253 253 253 253 253 253 253 253 253 253 253
58035-253 253 253 253 253 253 253 253 253 253 253 253
58036-253 253 253 221 221 221 86 86 86 156 107 11
58037-216 158 10 236 178 12 242 186 14 246 186 14
58038-242 186 14 239 182 13 239 182 13 242 186 14
58039-242 186 14 246 186 14 246 190 14 246 190 14
58040-246 190 14 246 190 14 246 190 14 246 190 14
58041-242 186 14 225 175 15 142 122 72 66 66 66
58042- 30 30 30 10 10 10 0 0 0 0 0 0
58043- 0 0 0 0 0 0 0 0 0 0 0 0
58044- 0 0 0 0 0 0 0 0 0 0 0 0
58045- 0 0 0 0 0 0 0 0 0 6 6 6
58046- 26 26 26 70 70 70 163 133 67 210 150 10
58047-236 178 12 246 190 14 246 190 14 246 190 14
58048-246 190 14 246 190 14 246 190 14 246 190 14
58049-246 190 14 246 190 14 246 190 14 246 190 14
58050-246 190 14 246 190 14 246 190 14 246 190 14
58051-232 195 16 121 92 8 34 34 34 106 106 106
58052-221 221 221 253 253 253 253 253 253 253 253 253
58053-253 253 253 253 253 253 253 253 253 253 253 253
58054-253 253 253 253 253 253 253 253 253 253 253 253
58055-253 253 253 253 253 253 253 253 253 253 253 253
58056-242 242 242 82 82 82 18 14 6 163 110 8
58057-216 158 10 236 178 12 242 186 14 246 190 14
58058-246 190 14 246 190 14 246 190 14 246 190 14
58059-246 190 14 246 190 14 246 190 14 246 190 14
58060-246 190 14 246 190 14 246 190 14 246 190 14
58061-246 190 14 246 190 14 242 186 14 163 133 67
58062- 46 46 46 18 18 18 6 6 6 0 0 0
58063- 0 0 0 0 0 0 0 0 0 0 0 0
58064- 0 0 0 0 0 0 0 0 0 0 0 0
58065- 0 0 0 0 0 0 0 0 0 10 10 10
58066- 30 30 30 78 78 78 163 133 67 210 150 10
58067-236 178 12 246 186 14 246 190 14 246 190 14
58068-246 190 14 246 190 14 246 190 14 246 190 14
58069-246 190 14 246 190 14 246 190 14 246 190 14
58070-246 190 14 246 190 14 246 190 14 246 190 14
58071-241 196 14 215 174 15 190 178 144 253 253 253
58072-253 253 253 253 253 253 253 253 253 253 253 253
58073-253 253 253 253 253 253 253 253 253 253 253 253
58074-253 253 253 253 253 253 253 253 253 253 253 253
58075-253 253 253 253 253 253 253 253 253 218 218 218
58076- 58 58 58 2 2 6 22 18 6 167 114 7
58077-216 158 10 236 178 12 246 186 14 246 190 14
58078-246 190 14 246 190 14 246 190 14 246 190 14
58079-246 190 14 246 190 14 246 190 14 246 190 14
58080-246 190 14 246 190 14 246 190 14 246 190 14
58081-246 190 14 246 186 14 242 186 14 190 150 46
58082- 54 54 54 22 22 22 6 6 6 0 0 0
58083- 0 0 0 0 0 0 0 0 0 0 0 0
58084- 0 0 0 0 0 0 0 0 0 0 0 0
58085- 0 0 0 0 0 0 0 0 0 14 14 14
58086- 38 38 38 86 86 86 180 133 36 213 154 11
58087-236 178 12 246 186 14 246 190 14 246 190 14
58088-246 190 14 246 190 14 246 190 14 246 190 14
58089-246 190 14 246 190 14 246 190 14 246 190 14
58090-246 190 14 246 190 14 246 190 14 246 190 14
58091-246 190 14 232 195 16 190 146 13 214 214 214
58092-253 253 253 253 253 253 253 253 253 253 253 253
58093-253 253 253 253 253 253 253 253 253 253 253 253
58094-253 253 253 253 253 253 253 253 253 253 253 253
58095-253 253 253 250 250 250 170 170 170 26 26 26
58096- 2 2 6 2 2 6 37 26 9 163 110 8
58097-219 162 10 239 182 13 246 186 14 246 190 14
58098-246 190 14 246 190 14 246 190 14 246 190 14
58099-246 190 14 246 190 14 246 190 14 246 190 14
58100-246 190 14 246 190 14 246 190 14 246 190 14
58101-246 186 14 236 178 12 224 166 10 142 122 72
58102- 46 46 46 18 18 18 6 6 6 0 0 0
58103- 0 0 0 0 0 0 0 0 0 0 0 0
58104- 0 0 0 0 0 0 0 0 0 0 0 0
58105- 0 0 0 0 0 0 6 6 6 18 18 18
58106- 50 50 50 109 106 95 192 133 9 224 166 10
58107-242 186 14 246 190 14 246 190 14 246 190 14
58108-246 190 14 246 190 14 246 190 14 246 190 14
58109-246 190 14 246 190 14 246 190 14 246 190 14
58110-246 190 14 246 190 14 246 190 14 246 190 14
58111-242 186 14 226 184 13 210 162 10 142 110 46
58112-226 226 226 253 253 253 253 253 253 253 253 253
58113-253 253 253 253 253 253 253 253 253 253 253 253
58114-253 253 253 253 253 253 253 253 253 253 253 253
58115-198 198 198 66 66 66 2 2 6 2 2 6
58116- 2 2 6 2 2 6 50 34 6 156 107 11
58117-219 162 10 239 182 13 246 186 14 246 190 14
58118-246 190 14 246 190 14 246 190 14 246 190 14
58119-246 190 14 246 190 14 246 190 14 246 190 14
58120-246 190 14 246 190 14 246 190 14 242 186 14
58121-234 174 13 213 154 11 154 122 46 66 66 66
58122- 30 30 30 10 10 10 0 0 0 0 0 0
58123- 0 0 0 0 0 0 0 0 0 0 0 0
58124- 0 0 0 0 0 0 0 0 0 0 0 0
58125- 0 0 0 0 0 0 6 6 6 22 22 22
58126- 58 58 58 154 121 60 206 145 10 234 174 13
58127-242 186 14 246 186 14 246 190 14 246 190 14
58128-246 190 14 246 190 14 246 190 14 246 190 14
58129-246 190 14 246 190 14 246 190 14 246 190 14
58130-246 190 14 246 190 14 246 190 14 246 190 14
58131-246 186 14 236 178 12 210 162 10 163 110 8
58132- 61 42 6 138 138 138 218 218 218 250 250 250
58133-253 253 253 253 253 253 253 253 253 250 250 250
58134-242 242 242 210 210 210 144 144 144 66 66 66
58135- 6 6 6 2 2 6 2 2 6 2 2 6
58136- 2 2 6 2 2 6 61 42 6 163 110 8
58137-216 158 10 236 178 12 246 190 14 246 190 14
58138-246 190 14 246 190 14 246 190 14 246 190 14
58139-246 190 14 246 190 14 246 190 14 246 190 14
58140-246 190 14 239 182 13 230 174 11 216 158 10
58141-190 142 34 124 112 88 70 70 70 38 38 38
58142- 18 18 18 6 6 6 0 0 0 0 0 0
58143- 0 0 0 0 0 0 0 0 0 0 0 0
58144- 0 0 0 0 0 0 0 0 0 0 0 0
58145- 0 0 0 0 0 0 6 6 6 22 22 22
58146- 62 62 62 168 124 44 206 145 10 224 166 10
58147-236 178 12 239 182 13 242 186 14 242 186 14
58148-246 186 14 246 190 14 246 190 14 246 190 14
58149-246 190 14 246 190 14 246 190 14 246 190 14
58150-246 190 14 246 190 14 246 190 14 246 190 14
58151-246 190 14 236 178 12 216 158 10 175 118 6
58152- 80 54 7 2 2 6 6 6 6 30 30 30
58153- 54 54 54 62 62 62 50 50 50 38 38 38
58154- 14 14 14 2 2 6 2 2 6 2 2 6
58155- 2 2 6 2 2 6 2 2 6 2 2 6
58156- 2 2 6 6 6 6 80 54 7 167 114 7
58157-213 154 11 236 178 12 246 190 14 246 190 14
58158-246 190 14 246 190 14 246 190 14 246 190 14
58159-246 190 14 242 186 14 239 182 13 239 182 13
58160-230 174 11 210 150 10 174 135 50 124 112 88
58161- 82 82 82 54 54 54 34 34 34 18 18 18
58162- 6 6 6 0 0 0 0 0 0 0 0 0
58163- 0 0 0 0 0 0 0 0 0 0 0 0
58164- 0 0 0 0 0 0 0 0 0 0 0 0
58165- 0 0 0 0 0 0 6 6 6 18 18 18
58166- 50 50 50 158 118 36 192 133 9 200 144 11
58167-216 158 10 219 162 10 224 166 10 226 170 11
58168-230 174 11 236 178 12 239 182 13 239 182 13
58169-242 186 14 246 186 14 246 190 14 246 190 14
58170-246 190 14 246 190 14 246 190 14 246 190 14
58171-246 186 14 230 174 11 210 150 10 163 110 8
58172-104 69 6 10 10 10 2 2 6 2 2 6
58173- 2 2 6 2 2 6 2 2 6 2 2 6
58174- 2 2 6 2 2 6 2 2 6 2 2 6
58175- 2 2 6 2 2 6 2 2 6 2 2 6
58176- 2 2 6 6 6 6 91 60 6 167 114 7
58177-206 145 10 230 174 11 242 186 14 246 190 14
58178-246 190 14 246 190 14 246 186 14 242 186 14
58179-239 182 13 230 174 11 224 166 10 213 154 11
58180-180 133 36 124 112 88 86 86 86 58 58 58
58181- 38 38 38 22 22 22 10 10 10 6 6 6
58182- 0 0 0 0 0 0 0 0 0 0 0 0
58183- 0 0 0 0 0 0 0 0 0 0 0 0
58184- 0 0 0 0 0 0 0 0 0 0 0 0
58185- 0 0 0 0 0 0 0 0 0 14 14 14
58186- 34 34 34 70 70 70 138 110 50 158 118 36
58187-167 114 7 180 123 7 192 133 9 197 138 11
58188-200 144 11 206 145 10 213 154 11 219 162 10
58189-224 166 10 230 174 11 239 182 13 242 186 14
58190-246 186 14 246 186 14 246 186 14 246 186 14
58191-239 182 13 216 158 10 185 133 11 152 99 6
58192-104 69 6 18 14 6 2 2 6 2 2 6
58193- 2 2 6 2 2 6 2 2 6 2 2 6
58194- 2 2 6 2 2 6 2 2 6 2 2 6
58195- 2 2 6 2 2 6 2 2 6 2 2 6
58196- 2 2 6 6 6 6 80 54 7 152 99 6
58197-192 133 9 219 162 10 236 178 12 239 182 13
58198-246 186 14 242 186 14 239 182 13 236 178 12
58199-224 166 10 206 145 10 192 133 9 154 121 60
58200- 94 94 94 62 62 62 42 42 42 22 22 22
58201- 14 14 14 6 6 6 0 0 0 0 0 0
58202- 0 0 0 0 0 0 0 0 0 0 0 0
58203- 0 0 0 0 0 0 0 0 0 0 0 0
58204- 0 0 0 0 0 0 0 0 0 0 0 0
58205- 0 0 0 0 0 0 0 0 0 6 6 6
58206- 18 18 18 34 34 34 58 58 58 78 78 78
58207-101 98 89 124 112 88 142 110 46 156 107 11
58208-163 110 8 167 114 7 175 118 6 180 123 7
58209-185 133 11 197 138 11 210 150 10 219 162 10
58210-226 170 11 236 178 12 236 178 12 234 174 13
58211-219 162 10 197 138 11 163 110 8 130 83 6
58212- 91 60 6 10 10 10 2 2 6 2 2 6
58213- 18 18 18 38 38 38 38 38 38 38 38 38
58214- 38 38 38 38 38 38 38 38 38 38 38 38
58215- 38 38 38 38 38 38 26 26 26 2 2 6
58216- 2 2 6 6 6 6 70 47 6 137 92 6
58217-175 118 6 200 144 11 219 162 10 230 174 11
58218-234 174 13 230 174 11 219 162 10 210 150 10
58219-192 133 9 163 110 8 124 112 88 82 82 82
58220- 50 50 50 30 30 30 14 14 14 6 6 6
58221- 0 0 0 0 0 0 0 0 0 0 0 0
58222- 0 0 0 0 0 0 0 0 0 0 0 0
58223- 0 0 0 0 0 0 0 0 0 0 0 0
58224- 0 0 0 0 0 0 0 0 0 0 0 0
58225- 0 0 0 0 0 0 0 0 0 0 0 0
58226- 6 6 6 14 14 14 22 22 22 34 34 34
58227- 42 42 42 58 58 58 74 74 74 86 86 86
58228-101 98 89 122 102 70 130 98 46 121 87 25
58229-137 92 6 152 99 6 163 110 8 180 123 7
58230-185 133 11 197 138 11 206 145 10 200 144 11
58231-180 123 7 156 107 11 130 83 6 104 69 6
58232- 50 34 6 54 54 54 110 110 110 101 98 89
58233- 86 86 86 82 82 82 78 78 78 78 78 78
58234- 78 78 78 78 78 78 78 78 78 78 78 78
58235- 78 78 78 82 82 82 86 86 86 94 94 94
58236-106 106 106 101 101 101 86 66 34 124 80 6
58237-156 107 11 180 123 7 192 133 9 200 144 11
58238-206 145 10 200 144 11 192 133 9 175 118 6
58239-139 102 15 109 106 95 70 70 70 42 42 42
58240- 22 22 22 10 10 10 0 0 0 0 0 0
58241- 0 0 0 0 0 0 0 0 0 0 0 0
58242- 0 0 0 0 0 0 0 0 0 0 0 0
58243- 0 0 0 0 0 0 0 0 0 0 0 0
58244- 0 0 0 0 0 0 0 0 0 0 0 0
58245- 0 0 0 0 0 0 0 0 0 0 0 0
58246- 0 0 0 0 0 0 6 6 6 10 10 10
58247- 14 14 14 22 22 22 30 30 30 38 38 38
58248- 50 50 50 62 62 62 74 74 74 90 90 90
58249-101 98 89 112 100 78 121 87 25 124 80 6
58250-137 92 6 152 99 6 152 99 6 152 99 6
58251-138 86 6 124 80 6 98 70 6 86 66 30
58252-101 98 89 82 82 82 58 58 58 46 46 46
58253- 38 38 38 34 34 34 34 34 34 34 34 34
58254- 34 34 34 34 34 34 34 34 34 34 34 34
58255- 34 34 34 34 34 34 38 38 38 42 42 42
58256- 54 54 54 82 82 82 94 86 76 91 60 6
58257-134 86 6 156 107 11 167 114 7 175 118 6
58258-175 118 6 167 114 7 152 99 6 121 87 25
58259-101 98 89 62 62 62 34 34 34 18 18 18
58260- 6 6 6 0 0 0 0 0 0 0 0 0
58261- 0 0 0 0 0 0 0 0 0 0 0 0
58262- 0 0 0 0 0 0 0 0 0 0 0 0
58263- 0 0 0 0 0 0 0 0 0 0 0 0
58264- 0 0 0 0 0 0 0 0 0 0 0 0
58265- 0 0 0 0 0 0 0 0 0 0 0 0
58266- 0 0 0 0 0 0 0 0 0 0 0 0
58267- 0 0 0 6 6 6 6 6 6 10 10 10
58268- 18 18 18 22 22 22 30 30 30 42 42 42
58269- 50 50 50 66 66 66 86 86 86 101 98 89
58270-106 86 58 98 70 6 104 69 6 104 69 6
58271-104 69 6 91 60 6 82 62 34 90 90 90
58272- 62 62 62 38 38 38 22 22 22 14 14 14
58273- 10 10 10 10 10 10 10 10 10 10 10 10
58274- 10 10 10 10 10 10 6 6 6 10 10 10
58275- 10 10 10 10 10 10 10 10 10 14 14 14
58276- 22 22 22 42 42 42 70 70 70 89 81 66
58277- 80 54 7 104 69 6 124 80 6 137 92 6
58278-134 86 6 116 81 8 100 82 52 86 86 86
58279- 58 58 58 30 30 30 14 14 14 6 6 6
58280- 0 0 0 0 0 0 0 0 0 0 0 0
58281- 0 0 0 0 0 0 0 0 0 0 0 0
58282- 0 0 0 0 0 0 0 0 0 0 0 0
58283- 0 0 0 0 0 0 0 0 0 0 0 0
58284- 0 0 0 0 0 0 0 0 0 0 0 0
58285- 0 0 0 0 0 0 0 0 0 0 0 0
58286- 0 0 0 0 0 0 0 0 0 0 0 0
58287- 0 0 0 0 0 0 0 0 0 0 0 0
58288- 0 0 0 6 6 6 10 10 10 14 14 14
58289- 18 18 18 26 26 26 38 38 38 54 54 54
58290- 70 70 70 86 86 86 94 86 76 89 81 66
58291- 89 81 66 86 86 86 74 74 74 50 50 50
58292- 30 30 30 14 14 14 6 6 6 0 0 0
58293- 0 0 0 0 0 0 0 0 0 0 0 0
58294- 0 0 0 0 0 0 0 0 0 0 0 0
58295- 0 0 0 0 0 0 0 0 0 0 0 0
58296- 6 6 6 18 18 18 34 34 34 58 58 58
58297- 82 82 82 89 81 66 89 81 66 89 81 66
58298- 94 86 66 94 86 76 74 74 74 50 50 50
58299- 26 26 26 14 14 14 6 6 6 0 0 0
58300- 0 0 0 0 0 0 0 0 0 0 0 0
58301- 0 0 0 0 0 0 0 0 0 0 0 0
58302- 0 0 0 0 0 0 0 0 0 0 0 0
58303- 0 0 0 0 0 0 0 0 0 0 0 0
58304- 0 0 0 0 0 0 0 0 0 0 0 0
58305- 0 0 0 0 0 0 0 0 0 0 0 0
58306- 0 0 0 0 0 0 0 0 0 0 0 0
58307- 0 0 0 0 0 0 0 0 0 0 0 0
58308- 0 0 0 0 0 0 0 0 0 0 0 0
58309- 6 6 6 6 6 6 14 14 14 18 18 18
58310- 30 30 30 38 38 38 46 46 46 54 54 54
58311- 50 50 50 42 42 42 30 30 30 18 18 18
58312- 10 10 10 0 0 0 0 0 0 0 0 0
58313- 0 0 0 0 0 0 0 0 0 0 0 0
58314- 0 0 0 0 0 0 0 0 0 0 0 0
58315- 0 0 0 0 0 0 0 0 0 0 0 0
58316- 0 0 0 6 6 6 14 14 14 26 26 26
58317- 38 38 38 50 50 50 58 58 58 58 58 58
58318- 54 54 54 42 42 42 30 30 30 18 18 18
58319- 10 10 10 0 0 0 0 0 0 0 0 0
58320- 0 0 0 0 0 0 0 0 0 0 0 0
58321- 0 0 0 0 0 0 0 0 0 0 0 0
58322- 0 0 0 0 0 0 0 0 0 0 0 0
58323- 0 0 0 0 0 0 0 0 0 0 0 0
58324- 0 0 0 0 0 0 0 0 0 0 0 0
58325- 0 0 0 0 0 0 0 0 0 0 0 0
58326- 0 0 0 0 0 0 0 0 0 0 0 0
58327- 0 0 0 0 0 0 0 0 0 0 0 0
58328- 0 0 0 0 0 0 0 0 0 0 0 0
58329- 0 0 0 0 0 0 0 0 0 6 6 6
58330- 6 6 6 10 10 10 14 14 14 18 18 18
58331- 18 18 18 14 14 14 10 10 10 6 6 6
58332- 0 0 0 0 0 0 0 0 0 0 0 0
58333- 0 0 0 0 0 0 0 0 0 0 0 0
58334- 0 0 0 0 0 0 0 0 0 0 0 0
58335- 0 0 0 0 0 0 0 0 0 0 0 0
58336- 0 0 0 0 0 0 0 0 0 6 6 6
58337- 14 14 14 18 18 18 22 22 22 22 22 22
58338- 18 18 18 14 14 14 10 10 10 6 6 6
58339- 0 0 0 0 0 0 0 0 0 0 0 0
58340- 0 0 0 0 0 0 0 0 0 0 0 0
58341- 0 0 0 0 0 0 0 0 0 0 0 0
58342- 0 0 0 0 0 0 0 0 0 0 0 0
58343- 0 0 0 0 0 0 0 0 0 0 0 0
58344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58346+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58347+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58348+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58349+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58353+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58354+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58355+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58356+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58357+4 4 4 4 4 4
58358+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58359+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58360+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58361+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58362+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58363+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58364+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58366+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58367+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58368+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58369+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58370+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58371+4 4 4 4 4 4
58372+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58373+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58374+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58375+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58376+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58377+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58378+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58380+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58381+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58382+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58383+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58384+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58385+4 4 4 4 4 4
58386+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58387+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58388+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58389+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58390+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58391+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58392+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58394+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58395+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58396+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58397+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58398+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58399+4 4 4 4 4 4
58400+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58401+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58402+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58403+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58404+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58406+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58409+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58410+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58411+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58412+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58413+4 4 4 4 4 4
58414+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58415+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58416+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58417+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58418+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58420+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58422+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58423+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58424+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58425+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58426+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58427+4 4 4 4 4 4
58428+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58431+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58432+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
58433+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
58434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58437+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
58438+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58439+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
58440+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58441+4 4 4 4 4 4
58442+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58445+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58446+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
58447+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
58448+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58449+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58451+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
58452+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
58453+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
58454+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58455+4 4 4 4 4 4
58456+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58459+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58460+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
58461+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
58462+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58463+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58464+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58465+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
58466+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
58467+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
58468+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
58469+4 4 4 4 4 4
58470+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58472+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58473+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
58474+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
58475+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
58476+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
58477+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58478+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
58479+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
58480+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
58481+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
58482+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
58483+4 4 4 4 4 4
58484+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58486+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58487+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
58488+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
58489+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
58490+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
58491+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58492+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
58493+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
58494+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
58495+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
58496+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
58497+4 4 4 4 4 4
58498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58500+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58501+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
58502+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
58503+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
58504+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
58505+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
58506+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
58507+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
58508+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
58509+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
58510+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
58511+4 4 4 4 4 4
58512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58514+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
58515+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
58516+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
58517+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
58518+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
58519+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
58520+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
58521+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
58522+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
58523+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
58524+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
58525+4 4 4 4 4 4
58526+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58527+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58528+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
58529+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
58530+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
58531+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
58532+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
58533+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
58534+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
58535+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
58536+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
58537+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
58538+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
58539+4 4 4 4 4 4
58540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58541+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58542+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
58543+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
58544+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
58545+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
58546+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
58547+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
58548+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
58549+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
58550+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
58551+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
58552+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58553+4 4 4 4 4 4
58554+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58555+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58556+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
58557+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
58558+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
58559+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
58560+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
58561+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
58562+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
58563+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
58564+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
58565+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
58566+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
58567+4 4 4 4 4 4
58568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58569+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
58570+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
58571+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
58572+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
58573+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
58574+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
58575+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
58576+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
58577+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
58578+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
58579+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
58580+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
58581+4 4 4 4 4 4
58582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58583+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
58584+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
58585+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
58586+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
58587+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
58588+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
58589+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
58590+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
58591+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
58592+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
58593+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
58594+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
58595+0 0 0 4 4 4
58596+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
58597+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
58598+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
58599+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
58600+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
58601+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
58602+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
58603+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
58604+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
58605+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
58606+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
58607+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
58608+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
58609+2 0 0 0 0 0
58610+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
58611+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
58612+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
58613+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
58614+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
58615+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
58616+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
58617+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
58618+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
58619+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
58620+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
58621+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
58622+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
58623+37 38 37 0 0 0
58624+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
58625+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
58626+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
58627+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
58628+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
58629+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
58630+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
58631+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
58632+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
58633+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
58634+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
58635+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
58636+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
58637+85 115 134 4 0 0
58638+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
58639+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
58640+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
58641+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
58642+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
58643+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
58644+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
58645+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
58646+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
58647+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
58648+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
58649+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
58650+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
58651+60 73 81 4 0 0
58652+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
58653+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
58654+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
58655+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
58656+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
58657+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
58658+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
58659+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
58660+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
58661+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
58662+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
58663+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
58664+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
58665+16 19 21 4 0 0
58666+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
58667+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
58668+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
58669+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
58670+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
58671+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
58672+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
58673+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
58674+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
58675+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
58676+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
58677+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
58678+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
58679+4 0 0 4 3 3
58680+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
58681+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
58682+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
58683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
58684+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
58685+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
58686+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
58687+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
58688+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
58689+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
58690+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
58691+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
58692+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
58693+3 2 2 4 4 4
58694+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
58695+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
58696+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
58697+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
58698+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
58699+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
58700+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
58701+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
58702+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
58703+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
58704+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
58705+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
58706+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
58707+4 4 4 4 4 4
58708+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
58709+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
58710+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
58711+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
58712+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
58713+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
58714+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
58715+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
58716+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
58717+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
58718+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
58719+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
58720+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
58721+4 4 4 4 4 4
58722+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
58723+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
58724+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
58725+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
58726+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
58727+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
58728+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
58729+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
58730+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
58731+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
58732+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
58733+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
58734+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
58735+5 5 5 5 5 5
58736+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
58737+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
58738+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
58739+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
58740+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
58741+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58742+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
58743+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
58744+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
58745+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
58746+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
58747+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
58748+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
58749+5 5 5 4 4 4
58750+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
58751+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
58752+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
58753+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
58754+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58755+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
58756+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
58757+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
58758+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
58759+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
58760+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
58761+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
58762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58763+4 4 4 4 4 4
58764+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
58765+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
58766+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
58767+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
58768+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
58769+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58770+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58771+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
58772+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
58773+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
58774+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
58775+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
58776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58777+4 4 4 4 4 4
58778+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
58779+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
58780+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
58781+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
58782+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58783+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
58784+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
58785+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
58786+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
58787+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
58788+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
58789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58791+4 4 4 4 4 4
58792+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
58793+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
58794+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
58795+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
58796+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58797+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58798+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58799+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
58800+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
58801+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
58802+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
58803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58805+4 4 4 4 4 4
58806+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
58807+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
58808+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
58809+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
58810+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58811+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
58812+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
58813+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
58814+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
58815+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
58816+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58819+4 4 4 4 4 4
58820+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
58821+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
58822+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
58823+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
58824+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58825+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
58826+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
58827+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
58828+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
58829+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
58830+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
58831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58833+4 4 4 4 4 4
58834+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
58835+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
58836+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
58837+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
58838+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58839+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
58840+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
58841+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
58842+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
58843+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
58844+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
58845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58847+4 4 4 4 4 4
58848+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
58849+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
58850+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
58851+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
58852+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
58853+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
58854+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
58855+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
58856+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
58857+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
58858+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58861+4 4 4 4 4 4
58862+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
58863+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
58864+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
58865+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
58866+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58867+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
58868+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
58869+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
58870+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
58871+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
58872+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58875+4 4 4 4 4 4
58876+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
58877+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
58878+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
58879+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
58880+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58881+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
58882+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
58883+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
58884+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
58885+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
58886+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58889+4 4 4 4 4 4
58890+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
58891+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
58892+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
58893+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
58894+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58895+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
58896+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
58897+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
58898+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
58899+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58900+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58903+4 4 4 4 4 4
58904+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
58905+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
58906+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
58907+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
58908+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
58909+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
58910+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
58911+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
58912+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58913+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58914+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58917+4 4 4 4 4 4
58918+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
58919+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
58920+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
58921+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
58922+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58923+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
58924+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
58925+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
58926+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
58927+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58928+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58931+4 4 4 4 4 4
58932+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
58933+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
58934+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
58935+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
58936+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
58937+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
58938+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
58939+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
58940+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58941+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58942+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58945+4 4 4 4 4 4
58946+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
58947+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
58948+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
58949+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
58950+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
58951+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
58952+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
58953+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
58954+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
58955+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58956+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58959+4 4 4 4 4 4
58960+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
58961+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
58962+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
58963+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
58964+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
58965+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
58966+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
58967+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
58968+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58969+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58970+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58973+4 4 4 4 4 4
58974+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
58975+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
58976+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
58977+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
58978+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
58979+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
58980+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
58981+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
58982+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
58983+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58984+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58987+4 4 4 4 4 4
58988+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
58989+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
58990+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
58991+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
58992+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
58993+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
58994+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
58995+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
58996+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58997+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58998+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59001+4 4 4 4 4 4
59002+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
59003+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
59004+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
59005+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
59006+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
59007+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
59008+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
59009+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
59010+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
59011+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59012+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59015+4 4 4 4 4 4
59016+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
59017+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
59018+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
59019+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
59020+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
59021+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
59022+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
59023+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
59024+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
59025+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59026+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59029+4 4 4 4 4 4
59030+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
59031+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
59032+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
59033+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
59034+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
59035+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
59036+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
59037+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
59038+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
59039+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59040+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59043+4 4 4 4 4 4
59044+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
59045+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
59046+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
59047+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
59048+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
59049+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
59050+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
59051+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
59052+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
59053+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59054+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59057+4 4 4 4 4 4
59058+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
59059+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
59060+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
59061+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
59062+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
59063+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
59064+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
59065+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
59066+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
59067+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59068+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59071+4 4 4 4 4 4
59072+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
59073+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
59074+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
59075+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
59076+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
59077+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
59078+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
59079+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
59080+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
59081+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59082+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59085+4 4 4 4 4 4
59086+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
59087+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
59088+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
59089+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
59090+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
59091+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
59092+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
59093+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
59094+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
59095+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59096+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59099+4 4 4 4 4 4
59100+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
59101+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
59102+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
59103+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
59104+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
59105+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
59106+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
59107+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
59108+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
59109+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
59110+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59113+4 4 4 4 4 4
59114+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
59115+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
59116+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
59117+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
59118+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
59119+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
59120+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
59121+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
59122+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
59123+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
59124+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59127+4 4 4 4 4 4
59128+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
59129+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
59130+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
59131+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
59132+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
59133+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
59134+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59135+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
59136+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
59137+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
59138+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59141+4 4 4 4 4 4
59142+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
59143+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
59144+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
59145+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
59146+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
59147+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
59148+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
59149+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
59150+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
59151+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
59152+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59155+4 4 4 4 4 4
59156+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
59157+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
59158+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
59159+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
59160+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
59161+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
59162+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
59163+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
59164+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
59165+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
59166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59169+4 4 4 4 4 4
59170+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
59171+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
59172+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
59173+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
59174+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
59175+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
59176+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
59177+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
59178+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
59179+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
59180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59183+4 4 4 4 4 4
59184+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
59185+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
59186+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
59187+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
59188+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
59189+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
59190+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
59191+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
59192+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
59193+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
59194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59197+4 4 4 4 4 4
59198+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
59199+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
59200+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
59201+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
59202+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
59203+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
59204+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
59205+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
59206+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
59207+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59211+4 4 4 4 4 4
59212+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
59213+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
59214+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
59215+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
59216+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
59217+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
59218+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
59219+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
59220+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
59221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59225+4 4 4 4 4 4
59226+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
59227+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
59228+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
59229+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
59230+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
59231+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
59232+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
59233+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
59234+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
59235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59239+4 4 4 4 4 4
59240+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
59241+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
59242+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
59243+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
59244+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
59245+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
59246+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
59247+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
59248+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59253+4 4 4 4 4 4
59254+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
59255+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
59256+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
59257+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
59258+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
59259+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
59260+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
59261+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
59262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59267+4 4 4 4 4 4
59268+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
59269+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
59270+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
59271+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
59272+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
59273+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
59274+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
59275+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
59276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59279+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59281+4 4 4 4 4 4
59282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59283+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
59284+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59285+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
59286+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
59287+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
59288+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
59289+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
59290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59292+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59293+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59295+4 4 4 4 4 4
59296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59297+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
59298+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
59299+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
59300+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
59301+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
59302+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
59303+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
59304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59306+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59307+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59309+4 4 4 4 4 4
59310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59311+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
59312+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
59313+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
59314+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
59315+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
59316+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
59317+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59319+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59320+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59321+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59323+4 4 4 4 4 4
59324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59326+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
59327+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
59328+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
59329+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
59330+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
59331+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59332+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59333+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59334+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59335+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59337+4 4 4 4 4 4
59338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59339+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59340+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59341+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59342+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
59343+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
59344+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
59345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59346+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59347+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59348+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59349+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59351+4 4 4 4 4 4
59352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59353+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59354+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59355+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
59356+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
59357+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
59358+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
59359+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59360+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59361+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59362+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59363+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59364+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59365+4 4 4 4 4 4
59366+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59367+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59368+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59369+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
59370+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
59371+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
59372+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
59373+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59374+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59375+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59376+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59377+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59378+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59379+4 4 4 4 4 4
59380+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59381+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59382+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59383+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
59384+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
59385+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
59386+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
59387+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59388+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59389+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59390+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59391+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59392+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59393+4 4 4 4 4 4
59394+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59395+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59396+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59397+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59398+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
59399+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59400+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59401+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59402+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59403+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59404+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59406+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59407+4 4 4 4 4 4
59408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59409+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59410+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59411+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59412+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
59413+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
59414+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
59415+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59416+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59417+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59418+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59420+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59421+4 4 4 4 4 4
59422+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59423+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59424+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59425+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59426+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
59427+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
59428+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59431+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59435+4 4 4 4 4 4
59436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59437+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59438+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59439+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59440+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
59441+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
59442+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59445+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59449+4 4 4 4 4 4
59450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59451+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59452+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59453+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59454+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
59455+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
59456+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59459+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59462+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59463+4 4 4 4 4 4
59464diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
59465index 2b8553b..e1a482b 100644
59466--- a/drivers/xen/events/events_base.c
59467+++ b/drivers/xen/events/events_base.c
59468@@ -1564,7 +1564,7 @@ void xen_irq_resume(void)
59469 restore_pirqs();
59470 }
59471
59472-static struct irq_chip xen_dynamic_chip __read_mostly = {
59473+static struct irq_chip xen_dynamic_chip = {
59474 .name = "xen-dyn",
59475
59476 .irq_disable = disable_dynirq,
59477@@ -1578,7 +1578,7 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
59478 .irq_retrigger = retrigger_dynirq,
59479 };
59480
59481-static struct irq_chip xen_pirq_chip __read_mostly = {
59482+static struct irq_chip xen_pirq_chip = {
59483 .name = "xen-pirq",
59484
59485 .irq_startup = startup_pirq,
59486@@ -1598,7 +1598,7 @@ static struct irq_chip xen_pirq_chip __read_mostly = {
59487 .irq_retrigger = retrigger_dynirq,
59488 };
59489
59490-static struct irq_chip xen_percpu_chip __read_mostly = {
59491+static struct irq_chip xen_percpu_chip = {
59492 .name = "xen-percpu",
59493
59494 .irq_disable = disable_dynirq,
59495diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
59496index fef20db..d28b1ab 100644
59497--- a/drivers/xen/xenfs/xenstored.c
59498+++ b/drivers/xen/xenfs/xenstored.c
59499@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
59500 static int xsd_kva_open(struct inode *inode, struct file *file)
59501 {
59502 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
59503+#ifdef CONFIG_GRKERNSEC_HIDESYM
59504+ NULL);
59505+#else
59506 xen_store_interface);
59507+#endif
59508+
59509 if (!file->private_data)
59510 return -ENOMEM;
59511 return 0;
59512diff --git a/firmware/Makefile b/firmware/Makefile
59513index e297e1b..6900c31 100644
59514--- a/firmware/Makefile
59515+++ b/firmware/Makefile
59516@@ -35,6 +35,7 @@ fw-shipped-$(CONFIG_BNX2X) += bnx2x/bnx2x-e1-6.2.9.0.fw \
59517 bnx2x/bnx2x-e1h-6.2.9.0.fw \
59518 bnx2x/bnx2x-e2-6.2.9.0.fw
59519 fw-shipped-$(CONFIG_BNX2) += bnx2/bnx2-mips-09-6.2.1a.fw \
59520+ bnx2/bnx2-mips-09-6.2.1b.fw \
59521 bnx2/bnx2-rv2p-09-6.0.17.fw \
59522 bnx2/bnx2-rv2p-09ax-6.0.17.fw \
59523 bnx2/bnx2-mips-06-6.2.1.fw \
59524diff --git a/firmware/WHENCE b/firmware/WHENCE
59525index 0c4d96d..7563083 100644
59526--- a/firmware/WHENCE
59527+++ b/firmware/WHENCE
59528@@ -655,19 +655,20 @@ Driver: BNX2 - Broadcom NetXtremeII
59529 File: bnx2/bnx2-mips-06-6.2.1.fw
59530 File: bnx2/bnx2-rv2p-06-6.0.15.fw
59531 File: bnx2/bnx2-mips-09-6.2.1a.fw
59532+File: bnx2/bnx2-mips-09-6.2.1b.fw
59533 File: bnx2/bnx2-rv2p-09-6.0.17.fw
59534 File: bnx2/bnx2-rv2p-09ax-6.0.17.fw
59535
59536 Licence:
59537-
59538- This file contains firmware data derived from proprietary unpublished
59539- source code, Copyright (c) 2004 - 2010 Broadcom Corporation.
59540-
59541- Permission is hereby granted for the distribution of this firmware data
59542- in hexadecimal or equivalent format, provided this copyright notice is
59543- accompanying it.
59544-
59545-Found in hex form in kernel source.
59546+
59547+ This file contains firmware data derived from proprietary unpublished
59548+ source code, Copyright (c) 2004 - 2010 Broadcom Corporation.
59549+
59550+ Permission is hereby granted for the distribution of this firmware data
59551+ in hexadecimal or equivalent format, provided this copyright notice is
59552+ accompanying it.
59553+
59554+Found in hex form in kernel source.
59555
59556 --------------------------------------------------------------------------
59557
59558diff --git a/firmware/bnx2/bnx2-mips-09-6.2.1b.fw.ihex b/firmware/bnx2/bnx2-mips-09-6.2.1b.fw.ihex
59559new file mode 100644
59560index 0000000..43d7c4f
59561--- /dev/null
59562+++ b/firmware/bnx2/bnx2-mips-09-6.2.1b.fw.ihex
59563@@ -0,0 +1,6496 @@
59564+:10000000080001180800000000005594000000C816
59565+:1000100000000000000000000000000008005594EF
59566+:10002000000000380000565C080000A00800000036
59567+:100030000000574400005694080059200000008436
59568+:100040000000ADD808005744000001C00000AE5CBD
59569+:100050000800321008000000000092580000B01C98
59570+:10006000000000000000000000000000080092589E
59571+:100070000000033C000142740800049008000400E2
59572+:10008000000012FC000145B000000000000000006C
59573+:1000900000000000080016FC00000004000158AC3D
59574+:1000A000080000A80800000000003D00000158B052
59575+:1000B00000000000000000000000000008003D00FB
59576+:1000C00000000030000195B00A000046000000006A
59577+:1000D000000000000000000D636F6D362E322E31DF
59578+:1000E00062000000060201020000000000000003A0
59579+:1000F000000000C800000032000000030000000003
59580+:1001000000000000000000000000000000000000EF
59581+:1001100000000010000001360000EA600000000549
59582+:1001200000000000000000000000000000000008C7
59583+:1001300000000000000000000000000000000000BF
59584+:1001400000000000000000000000000000000000AF
59585+:10015000000000000000000000000000000000009F
59586+:10016000000000020000000000000000000000008D
59587+:10017000000000000000000000000000000000007F
59588+:10018000000000000000000000000010000000005F
59589+:10019000000000000000000000000000000000005F
59590+:1001A000000000000000000000000000000000004F
59591+:1001B000000000000000000000000000000000003F
59592+:1001C000000000000000000000000000000000002F
59593+:1001D000000000000000000000000000000000001F
59594+:1001E0000000000010000003000000000000000DEF
59595+:1001F0000000000D3C020800244256083C030800A1
59596+:1002000024635754AC4000000043202B1480FFFDB2
59597+:10021000244200043C1D080037BD9FFC03A0F021D0
59598+:100220003C100800261001183C1C0800279C5608AA
59599+:100230000E000256000000000000000D27BDFFB4B4
59600+:10024000AFA10000AFA20004AFA30008AFA4000C50
59601+:10025000AFA50010AFA60014AFA70018AFA8001CF0
59602+:10026000AFA90020AFAA0024AFAB0028AFAC002C90
59603+:10027000AFAD0030AFAE0034AFAF0038AFB8003C28
59604+:10028000AFB90040AFBC0044AFBF00480E001544FA
59605+:10029000000000008FBF00488FBC00448FB90040B1
59606+:1002A0008FB8003C8FAF00388FAE00348FAD003078
59607+:1002B0008FAC002C8FAB00288FAA00248FA90020C0
59608+:1002C0008FA8001C8FA700188FA600148FA5001000
59609+:1002D0008FA4000C8FA300088FA200048FA1000040
59610+:1002E00027BD004C3C1B60108F7A5030377B502864
59611+:1002F00003400008AF7A00008F82002427BDFFE092
59612+:10030000AFB00010AFBF0018AFB100148C42000CAA
59613+:100310003C1080008E110100104000348FBF001887
59614+:100320000E000D84000000008F85002024047FFF54
59615+:100330000091202BACB100008E030104960201084D
59616+:1003400000031C003042FFFF00621825ACA300042C
59617+:100350009202010A96030114304200FF3063FFFF4E
59618+:100360000002140000431025ACA200089603010C03
59619+:100370009602010E00031C003042FFFF00621825A8
59620+:10038000ACA3000C960301109602011200031C009E
59621+:100390003042FFFF00621825ACA300108E02011846
59622+:1003A000ACA200148E02011CACA20018148000083C
59623+:1003B0008F820024978200003C0420050044182509
59624+:1003C00024420001ACA3001C0A0000C6A782000062
59625+:1003D0003C0340189442001E00431025ACA2001CB0
59626+:1003E0000E000DB8240400018FBF00188FB1001457
59627+:1003F0008FB000100000102103E0000827BD00208E
59628+:100400003C0780008CE202B834E50100044100089A
59629+:10041000240300013C0208008C42006024420001D9
59630+:100420003C010800AC22006003E0000800601021DD
59631+:100430003C0208008C42005C8CA4002094A30016AF
59632+:100440008CA6000494A5000E24420001ACE40280B6
59633+:100450002463FFFC3C010800AC22005C3C0210005D
59634+:10046000A4E30284A4E5028600001821ACE6028819
59635+:10047000ACE202B803E000080060102127BDFFE0F5
59636+:100480003C028000AFB0001034420100AFBF001C3E
59637+:10049000AFB20018AFB100148C43000094450008BF
59638+:1004A0002462FE002C42038110400003000381C23D
59639+:1004B0000A00010226100004240201001462000553
59640+:1004C0003C1180003C02800890420004305000FF44
59641+:1004D0003C11800036320100964300143202000FB6
59642+:1004E00000021500004310253C0308008C63004403
59643+:1004F00030A40004AE220080246300013C01080007
59644+:10050000AC2300441080000730A200028FBF001C03
59645+:100510008FB200188FB100148FB000100A0000CE07
59646+:1005200027BD00201040002D0000182130A20080BF
59647+:1005300010400005362200708E44001C0E000C672F
59648+:10054000240500A0362200708C4400008F82000C2D
59649+:10055000008210232C43012C10600004AF82001095
59650+:10056000240300010A000145AF84000C8E42000400
59651+:100570003C036020AF84000CAC6200143C02080015
59652+:100580008C42005850400015000018218C62000475
59653+:10059000240301FE304203FF144300100000182121
59654+:1005A0002E020004104000032E0200080A00014041
59655+:1005B0000000802114400003000000000A000140F8
59656+:1005C0002610FFF90000000D2402000202021004B0
59657+:1005D0003C036000AC626914000018218FBF001C4E
59658+:1005E0008FB200188FB100148FB00010006010217E
59659+:1005F00003E0000827BD00203C0480008C8301003C
59660+:1006000024020100506200033C0280080000000D3B
59661+:100610003C02800890430004000010213063000F6A
59662+:1006200000031D0003E00008AC8300800004188074
59663+:100630002782FF9C00621821000410C00044102390
59664+:100640008C640000000210C03C030800246356E4E0
59665+:10065000004310213C038000AC64009003E00008DC
59666+:10066000AF8200243C0208008C42011410400019A3
59667+:100670003084400030A2007F000231C03C02020002
59668+:100680001080001400A218253C026020AC43001426
59669+:100690003C0408008C8456B83C0308008C630110AD
59670+:1006A0003C02800024050900AC4500200086202182
59671+:1006B000246300013C028008AC4400643C01080053
59672+:1006C000AC2301103C010800AC2456B803E000083C
59673+:1006D000000000003C02602003E00008AC4500146C
59674+:1006E00003E000080000102103E0000800001021D2
59675+:1006F00030A2000810400008240201003C0208005B
59676+:100700008C42010C244200013C010800AC22010C87
59677+:1007100003E0000800000000148200080000000050
59678+:100720003C0208008C4200FC244200013C0108000D
59679+:10073000AC2200FC0A0001A330A200203C02080009
59680+:100740008C420084244200013C010800AC22008459
59681+:1007500030A200201040000830A200103C02080027
59682+:100760008C420108244200013C010800AC2201082F
59683+:1007700003E0000800000000104000080000000036
59684+:100780003C0208008C420104244200013C010800A4
59685+:10079000AC22010403E00008000000003C02080055
59686+:1007A0008C420100244200013C010800AC220100FF
59687+:1007B00003E000080000000027BDFFE0AFB1001417
59688+:1007C0003C118000AFB20018AFBF001CAFB00010EA
59689+:1007D0003632010096500008320200041040000733
59690+:1007E000320300028FBF001C8FB200188FB10014BB
59691+:1007F0008FB000100A0000CE27BD00201060000B53
59692+:10080000020028218E2401000E00018A0000000051
59693+:100810003202008010400003240500A10E000C6786
59694+:100820008E44001C0A0001E3240200018E2301040F
59695+:100830008F82000810430006020028218E24010048
59696+:100840000E00018A000000008E220104AF82000821
59697+:10085000000010218FBF001C8FB200188FB1001450
59698+:100860008FB0001003E0000827BD00202C82000498
59699+:1008700014400002000018212483FFFD240200021E
59700+:10088000006210043C03600003E00008AC626914DD
59701+:1008900027BDFFE0AFBF001CAFB20018AFB100141E
59702+:1008A000AFB000103C048000948201083043700017
59703+:1008B000240220001062000A2862200154400052E5
59704+:1008C0008FBF001C24024000106200482402600018
59705+:1008D0001062004A8FBF001C0A0002518FB200183C
59706+:1008E00034820100904300098C5000189451000C90
59707+:1008F000240200091062001C0000902128620009F7
59708+:10090000144000218F8200242402000A5062001249
59709+:10091000323100FF2402000B1062000F00000000C3
59710+:100920002402000C146200188F8200243C0208008C
59711+:100930008C4256B824030900AC83002000501021DB
59712+:100940003C038008AC6200643C010800AC2256B84D
59713+:100950000A0002508FBF001C0E0001E900102602A1
59714+:100960000A0002308F8200240E0001E900102602E6
59715+:100970003C0380089462001A8C72000C3042FFFF26
59716+:10098000020280258F8200248C42000C5040001E01
59717+:100990008FBF001C0E000D84000000003C02800090
59718+:1009A00034420100944300088F82002400031C009D
59719+:1009B0009444001E8F82002000641825AC50000073
59720+:1009C00024040001AC510004AC520008AC40000CFF
59721+:1009D000AC400010AC400014AC4000180E000DB844
59722+:1009E000AC43001C0A0002508FBF001C0E000440E4
59723+:1009F000000000000A0002508FBF001C0E000C9F78
59724+:100A0000000000008FBF001C8FB200188FB10014CF
59725+:100A10008FB000100000102103E0000827BD002067
59726+:100A200027BDFFD8AFB400203C036010AFBF002447
59727+:100A3000AFB3001CAFB20018AFB10014AFB00010DC
59728+:100A40008C6450002402FF7F3C1408002694563822
59729+:100A5000008220243484380CAC6450003C028000B6
59730+:100A6000240300370E0014B0AC4300083C07080014
59731+:100A700024E70618028010212404001D2484FFFFAF
59732+:100A8000AC4700000481FFFD244200043C02080042
59733+:100A9000244207C83C010800AC2256403C02080032
59734+:100AA000244202303C030800246306203C04080072
59735+:100AB000248403B43C05080024A506F03C06080085
59736+:100AC00024C62C9C3C010800AC2256803C02080045
59737+:100AD000244205303C010800AC2756843C01080044
59738+:100AE000AC2656943C010800AC23569C3C010800FF
59739+:100AF000AC2456A03C010800AC2556A43C010800DB
59740+:100B0000AC2256A83C010800AC23563C3C0108002E
59741+:100B1000AC2456443C010800AC2056603C0108005F
59742+:100B2000AC2556643C010800AC2056703C0108001E
59743+:100B3000AC27567C3C010800AC2656903C010800CE
59744+:100B4000AC2356980E00056E00000000AF80000C2C
59745+:100B50003C0280008C5300008F8300043C0208009C
59746+:100B60008C420020106200213262000700008821C0
59747+:100B70002792FF9C3C100800261056E43C02080017
59748+:100B80008C42002024050001022518040043202483
59749+:100B90008F820004004310245044000C26310001D1
59750+:100BA00010800008AF9000248E4300003C028000BB
59751+:100BB000AC4300900E000D4BAE05000C0A0002C1C4
59752+:100BC00026310001AE00000C263100012E22000269
59753+:100BD000261000381440FFE9265200043C020800A9
59754+:100BE0008C420020AF820004326200071040FFD91F
59755+:100BF0003C028000326200011040002D326200028F
59756+:100C00003C0580008CA2010000002021ACA2002045
59757+:100C10008CA301042C42078110400008ACA300A85B
59758+:100C200094A2010824032000304270001443000302
59759+:100C30003C02800890420005304400FF0E0001593C
59760+:100C4000000000003C0280009042010B304300FF96
59761+:100C50002C62001E54400004000310800E00018628
59762+:100C60000A0002EC00000000005410218C42000039
59763+:100C70000040F80900000000104000043C02800021
59764+:100C80008C4301043C026020AC4300143C02080089
59765+:100C90008C4200343C0440003C03800024420001AC
59766+:100CA000AC6401383C010800AC220034326200021E
59767+:100CB00010400010326200043C1080008E0201409F
59768+:100CC000000020210E000159AE0200200E00038317
59769+:100CD000000000003C024000AE0201783C02080027
59770+:100CE0008C420038244200013C010800AC2200384C
59771+:100CF000326200041040FF973C0280003C108000EC
59772+:100D00008E020180000020210E000159AE02002059
59773+:100D10008E03018024020F00546200073C02800809
59774+:100D20008E0201883C0300E03042FFFF00431025A3
59775+:100D30000A000328AE020080344200809042000086
59776+:100D400024030050304200FF14430007000000005D
59777+:100D50000E000362000000001440000300000000C9
59778+:100D60000E000971000000003C0208008C42003CAB
59779+:100D70003C0440003C03800024420001AC6401B804
59780+:100D80003C010800AC22003C0A0002A33C028000A7
59781+:100D90003C02900034420001008220253C02800089
59782+:100DA000AC4400203C0380008C6200200440FFFE25
59783+:100DB0000000000003E00008000000003C0280008A
59784+:100DC000344300010083202503E00008AC440020E8
59785+:100DD00027BDFFE0AFB10014AFB000100080882144
59786+:100DE000AFBF00180E00033230B000FF8F83FF94B6
59787+:100DF000022020219062002502028025A07000259B
59788+:100E00008C7000183C0280000E00033D020280241A
59789+:100E10001600000B8FBF00183C0480008C8201F884
59790+:100E20000440FFFE348201C024030002AC510000E4
59791+:100E3000A04300043C021000AC8201F88FBF0018F0
59792+:100E40008FB100148FB0001003E0000827BD002010
59793+:100E500027BDFFE83C028000AFBF00103442018094
59794+:100E6000944300048C4400083063020010600005C5
59795+:100E7000000028210E00100C000000000A0003787A
59796+:100E8000240500013C02FF000480000700821824B2
59797+:100E90003C02040014620004240500018F82FF94C8
59798+:100EA00090420008240500018FBF001000A010210F
59799+:100EB00003E0000827BD00188F82FF982405000179
59800+:100EC000A040001A3C028000344201400A00034264
59801+:100ED0008C4400008F85FF9427BDFFE0AFBF001C4E
59802+:100EE000AFB20018AFB10014AFB0001090A2000074
59803+:100EF000304400FF38830020388200300003182B74
59804+:100F00000002102B0062182410600003240200501D
59805+:100F1000148200A88FBF001C90A20005304200017F
59806+:100F2000104000A48FBF001C3C02800034420140EE
59807+:100F3000904200082443FFFF2C6200051040009EF1
59808+:100F40008FB20018000310803C030800246355ACE6
59809+:100F5000004310218C420000004000080000000007
59810+:100F60003C028000345101400E0003328E24000008
59811+:100F70008F92FF948E2200048E50000C1602000205
59812+:100F800024020001AE42000C0E00033D8E2400003E
59813+:100F90008E220004145000068FBF001C8FB2001870
59814+:100FA0008FB100148FB000100A000F7827BD002009
59815+:100FB0008E42000C0A000419000000003C0480006E
59816+:100FC0003482014094A300108C4200043063FFFF80
59817+:100FD0001443001C0000000024020001A4A2001021
59818+:100FE0008C8202380441000F3C0380003C02003F29
59819+:100FF0003448F0003C0760003C06FFC08CE22BBC8C
59820+:1010000000461824004810240002130200031D8229
59821+:10101000106200583C0280008C8202380440FFF7C6
59822+:101020003C038000346201408C44000034620200C2
59823+:10103000AC4400003C021000AC6202380A00043BE1
59824+:101040008FBF001C94A200100A00041900000000C9
59825+:10105000240200201482000F3C0280003C03800028
59826+:1010600094A20012346301408C6300043042FFFFFD
59827+:10107000146200050000000024020001A4A2001276
59828+:101080000A0004028FBF001C94A200120A00041977
59829+:1010900000000000345101400E0003328E24000095
59830+:1010A0008F92FF948E230004964200123050FFFF6F
59831+:1010B0001603000224020001A64200120E00033DA6
59832+:1010C0008E2400008E220004160200068FBF001C32
59833+:1010D0008FB200188FB100148FB000100A00037C8B
59834+:1010E00027BD0020964200120A00041900000000EB
59835+:1010F0003C03800094A20014346301408C6300041C
59836+:101100003042FFFF14620008240200018FBF001C60
59837+:101110008FB200188FB100148FB00010A4A2001479
59838+:101120000A00146327BD002094A20014144000217B
59839+:101130008FBF001C0A000435000000003C03800043
59840+:1011400094A20016346301408C6300043042FFFF18
59841+:101150001462000D240200018FBF001C8FB2001822
59842+:101160008FB100148FB00010A4A200160A000B1457
59843+:1011700027BD00209442007824420004A4A200105D
59844+:101180000A00043B8FBF001C94A200162403000138
59845+:101190003042FFFF144300078FBF001C3C020800D1
59846+:1011A0008C420070244200013C010800AC22007017
59847+:1011B0008FBF001C8FB200188FB100148FB00010C9
59848+:1011C00003E0000827BD002027BDFFD8AFB20018FC
59849+:1011D0008F92FF94AFB10014AFBF0020AFB3001CDB
59850+:1011E000AFB000103C028000345101008C5001006F
59851+:1011F0009242000092230009304400FF2402001FA5
59852+:10120000106200AB28620020104000192402003850
59853+:101210002862000A1040000D2402000B286200081A
59854+:101220001040002E8F820024046001042862000216
59855+:101230001440002A8F820024240200061062002637
59856+:101240008FBF00200A00055F8FB3001C1062006092
59857+:101250002862000B144000FA8FBF00202402000E09
59858+:10126000106200788F8200240A00055F8FB3001C93
59859+:10127000106200D2286200391040000A2402008067
59860+:1012800024020036106200E528620037104000C3D7
59861+:1012900024020035106200D98FBF00200A00055FCC
59862+:1012A0008FB3001C1062002D2862008110400006E0
59863+:1012B000240200C824020039106200C98FBF002038
59864+:1012C0000A00055F8FB3001C106200A28FBF0020D0
59865+:1012D0000A00055F8FB3001C8F8200248C42000C33
59866+:1012E000104000D78FBF00200E000D8400000000CA
59867+:1012F0003C038000346301008C6200008F85002075
59868+:10130000946700089466000CACA200008C64000492
59869+:101310008F82002400063400ACA400049448001E10
59870+:101320008C62001800073C0000E83825ACA20008D9
59871+:101330008C62001C24040001ACA2000C9062000A24
59872+:1013400000C23025ACA60010ACA00014ACA0001860
59873+:10135000ACA7001C0A00051D8FBF00208F8200244F
59874+:101360008C42000C104000B68FBF00200E000D8490
59875+:10137000000000008F820024962400089625000CAF
59876+:101380009443001E000422029626000E8F82002045
59877+:10139000000426000083202500052C003C0300806B
59878+:1013A00000A6282500832025AC400000AC400004A6
59879+:1013B000AC400008AC40000CAC450010AC40001440
59880+:1013C000AC400018AC44001C0A00051C24040001B9
59881+:1013D0009622000C14400018000000009242000504
59882+:1013E0003042001014400014000000000E000332D0
59883+:1013F0000200202192420005020020213442001008
59884+:101400000E00033DA242000592420000240300208A
59885+:10141000304200FF10430089020020218FBF0020CE
59886+:101420008FB3001C8FB200188FB100148FB0001062
59887+:101430000A00107527BD00280000000D0A00055E97
59888+:101440008FBF00208C42000C1040007D8FBF002019
59889+:101450000E000D84000000008E2200048F84002006
59890+:101460009623000CAC8200003C0280089445002CBE
59891+:101470008F82002400031C0030A5FFFF9446001E4D
59892+:101480003C02400E0065182500C23025AC830004E4
59893+:10149000AC800008AC80000CAC800010AC80001464
59894+:1014A000AC800018AC86001C0A00051C2404000156
59895+:1014B0000E000332020020218F93FF9802002021AA
59896+:1014C0000E00033DA660000C020020210E00034226
59897+:1014D000240500018F8200248C42000C104000582B
59898+:1014E0008FBF00200E000D84000000009622000C2B
59899+:1014F0008F83002000021400AC700000AC62000476
59900+:10150000AC6000088E4400388F820024AC64000C6C
59901+:101510008E46003C9445001E3C02401FAC66001005
59902+:1015200000A228258E62000424040001AC6200148D
59903+:10153000AC600018AC65001C8FBF00208FB3001C8E
59904+:101540008FB200188FB100148FB000100A000DB8D0
59905+:1015500027BD0028240200201082003A8FB3001C0F
59906+:101560000E000F5E00000000104000358FBF00200D
59907+:101570003C0480008C8201F80440FFFE348201C0EC
59908+:1015800024030002AC500000A04300043C02100001
59909+:10159000AC8201F80A00055E8FBF00200200202106
59910+:1015A0008FBF00208FB3001C8FB200188FB10014C2
59911+:1015B0008FB000100A000EA727BD00289625000C4A
59912+:1015C000020020218FBF00208FB3001C8FB20018B3
59913+:1015D0008FB100148FB000100A000ECC27BD002878
59914+:1015E000020020218FB3001C8FB200188FB10014AD
59915+:1015F0008FB000100A000EF727BD00289225000DBD
59916+:10160000020020218FB3001C8FB200188FB100148C
59917+:101610008FB000100A000F4827BD002802002021CB
59918+:101620008FBF00208FB3001C8FB200188FB1001441
59919+:101630008FB000100A000F1F27BD00288FBF0020A9
59920+:101640008FB3001C8FB200188FB100148FB0001040
59921+:1016500003E0000827BD00283C0580008CA202782A
59922+:101660000440FFFE34A2024024030002AC44000008
59923+:10167000A04300043C02100003E00008ACA2027882
59924+:10168000A380001803E00008A38000193C03800039
59925+:101690008C6202780440FFFE8F82001CAC62024024
59926+:1016A00024020002A06202443C02100003E0000891
59927+:1016B000AC6202783C02600003E000088C425404F3
59928+:1016C0009083003024020005008040213063003FF9
59929+:1016D0000000482114620005000050219082004C57
59930+:1016E0009483004E304900FF306AFFFFAD00000CCC
59931+:1016F000AD000010AD000024950200148D05001C03
59932+:101700008D0400183042FFFF004910230002110031
59933+:10171000000237C3004038210086202300A2102B8E
59934+:101720000082202300A72823AD05001CAD0400186B
59935+:10173000A5090014A5090020A50A001603E0000869
59936+:10174000A50A002203E000080000000027BDFFD822
59937+:10175000AFB200183C128008AFB40020AFB3001C39
59938+:10176000AFB10014AFBF0024AFB00010365101007C
59939+:101770003C0260008C4254049222000C3C1408008D
59940+:10178000929400F7304300FF2402000110620032FF
59941+:101790000080982124020002146200353650008037
59942+:1017A0000E00143D000000009202004C2403FF8054
59943+:1017B0003C0480003042007F000211C024420240FD
59944+:1017C0000262102100431824AC8300949245000863
59945+:1017D0009204004C3042007F3C03800614850007D1
59946+:1017E000004380212402FFFFA22200112402FFFFF8
59947+:1017F000A62200120A0005D22402FFFF9602002052
59948+:10180000A222001196020022A62200128E020024BB
59949+:101810003C048008AE2200143485008090A2004C65
59950+:1018200034830100A06200108CA2003CAC6200185E
59951+:101830008C820068AC6200F48C820064AC6200F0C0
59952+:101840008C82006CAC6200F824020001A0A2006847
59953+:101850000A0005EE3C0480080E001456000000004B
59954+:1018600036420080A04000680A0005EE3C04800873
59955+:10187000A2000068A20000690A0006293C02800854
59956+:10188000348300808C62003834850100AC62006CC7
59957+:1018900024020001A062006990A200D59083000894
59958+:1018A000305100FF3072007F12320019001111C058
59959+:1018B00024420240026210212403FF8000431824C6
59960+:1018C0003C048000AC8300943042007F3C038006DF
59961+:1018D000004380218E02000C1040000D02002021E8
59962+:1018E0000E00057E0000000026220001305100FF9E
59963+:1018F0009203003C023410260002102B0002102339
59964+:101900003063007F022288240A0005F8A203003C0D
59965+:101910003C088008350401008C8200E03507008017
59966+:10192000ACE2003C8C8200E0AD02000090E5004C8F
59967+:10193000908600D590E3004C908400D52402FF806F
59968+:1019400000A228243063007F308400FF00A62825F1
59969+:101950000064182A1060000230A500FF38A500803E
59970+:10196000A0E5004CA10500093C0280089043000E50
59971+:10197000344400803C058000A043000A8C8300189A
59972+:101980003C027FFF3442FFFF00621824AC83001842
59973+:101990008CA201F80440FFFE00000000ACB301C0BF
59974+:1019A0008FBF00248FB400208FB3001C8FB20018AB
59975+:1019B0008FB100148FB0001024020002A0A201C455
59976+:1019C00027BD00283C02100003E00008ACA201F88B
59977+:1019D00090A2000024420001A0A200003C030800E5
59978+:1019E0008C6300F4304200FF144300020080302179
59979+:1019F000A0A0000090A200008F84001C000211C073
59980+:101A00002442024024830040008220212402FF80DF
59981+:101A1000008220243063007F3C02800A006218218B
59982+:101A20003C028000AC44002403E00008ACC300008A
59983+:101A300094820006908300058C85000C8C86001033
59984+:101A40008C8700188C88001C8C8400203C010800C6
59985+:101A5000A42256C63C010800A02356C53C0108003C
59986+:101A6000AC2556CC3C010800AC2656D03C01080001
59987+:101A7000AC2756D83C010800AC2856DC3C010800D5
59988+:101A8000AC2456E003E00008000000003C0280089F
59989+:101A9000344201008C4400343C038000346504006F
59990+:101AA000AC6400388C420038AF850028AC62003C42
59991+:101AB0003C020005AC6200300000000000000000A5
59992+:101AC00003E00008000000003C020006308400FF34
59993+:101AD000008220253C028000AC4400300000000061
59994+:101AE00000000000000000003C0380008C62000049
59995+:101AF000304200101040FFFD3462040003E0000893
59996+:101B0000AF82002894C200003C080800950800CA73
59997+:101B100030E7FFFF0080482101021021A4C200002D
59998+:101B200094C200003042FFFF00E2102B544000013D
59999+:101B3000A4C7000094A200003C0308008C6300CC02
60000+:101B400024420001A4A2000094A200003042FFFF42
60001+:101B5000144300073C0280080107102BA4A00000DA
60002+:101B60005440000101003821A4C700003C02800855
60003+:101B7000344601008CC3002894A200003C0480007D
60004+:101B80003042FFFE000210C000621021AC82003C17
60005+:101B90008C82003C006218231860000400000000E2
60006+:101BA0008CC200240A0006BA244200018CC2002420
60007+:101BB000AC8200383C020050344200103C038000EC
60008+:101BC000AC620030000000000000000000000000D7
60009+:101BD0008C620000304200201040FFFD0000000039
60010+:101BE00094A200003C04800030420001000210C0BA
60011+:101BF000004410218C430400AD2300008C420404F7
60012+:101C0000AD2200043C02002003E00008AC8200305A
60013+:101C100027BDFFE0AFB20018AFB10014AFB00010A5
60014+:101C2000AFBF001C94C2000000C080213C1208001D
60015+:101C3000965200C624420001A6020000960300004E
60016+:101C400094E2000000E03021144300058FB1003021
60017+:101C50000E00068F024038210A0006F10000000045
60018+:101C60008C8300048C82000424420040046100073D
60019+:101C7000AC8200048C8200040440000400000000D8
60020+:101C80008C82000024420001AC8200009602000019
60021+:101C90003042FFFF50520001A600000096220000D3
60022+:101CA00024420001A62200003C02800834420100C8
60023+:101CB000962300009442003C144300048FBF001C94
60024+:101CC00024020001A62200008FBF001C8FB2001862
60025+:101CD0008FB100148FB0001003E0000827BD002072
60026+:101CE00027BDFFE03C028008AFBF0018344201006E
60027+:101CF0008C4800343C03800034690400AC68003830
60028+:101D00008C42003830E700FFAF890028AC62003C0D
60029+:101D10003C020005AC620030000000000000000042
60030+:101D200000000000000000000000000000000000B3
60031+:101D30008C82000C8C82000C97830016AD22000070
60032+:101D40008C82001000604021AD2200048C820018BB
60033+:101D5000AD2200088C82001CAD22000C8CA2001465
60034+:101D6000AD2200108C820020AD220014908200056C
60035+:101D7000304200FF00021200AD2200188CA20018B1
60036+:101D8000AD22001C8CA2000CAD2200208CA2001001
60037+:101D9000AD2200248CA2001CAD2200288CA20020C1
60038+:101DA000AD22002C3402FFFFAD260030AD20003400
60039+:101DB000506200013408FFFFAD28003850E00011E8
60040+:101DC0003C0280083C048008348401009482005066
60041+:101DD0003042FFFFAD22003C9483004494850044D0
60042+:101DE000240200013063FFFF000318C200641821C1
60043+:101DF0009064006430A5000700A210040A00075C8C
60044+:101E00000044102534420100AD20003C94430044BE
60045+:101E1000944400443063FFFF000318C2006218219D
60046+:101E200030840007906500642402000100821004E1
60047+:101E30000002102700451024A0620064000000008A
60048+:101E400000000000000000003C0200063442004098
60049+:101E50003C038000AC620030000000000000000085
60050+:101E6000000000008C620000304200101040FFFDB6
60051+:101E70003C06800834C201503463040034C7014A70
60052+:101E800034C4013434C5014034C60144AFA200104B
60053+:101E90000E0006D2AF8300288FBF001803E00008B1
60054+:101EA00027BD00208F8300143C0608008CC600E884
60055+:101EB0008F82001C30633FFF000319800046102111
60056+:101EC000004310212403FF80004318243C068000B7
60057+:101ED000ACC300283042007F3C03800C004330211B
60058+:101EE00090C2000D30A500FF0000382134420010E0
60059+:101EF000A0C2000D8F8900143C028008344201000A
60060+:101F00009443004400091382304800032402000176
60061+:101F1000A4C3000E1102000B2902000210400005AC
60062+:101F2000240200021100000C240300010A0007A48F
60063+:101F30000000182111020006000000000A0007A49A
60064+:101F4000000018218CC2002C0A0007A424430001C1
60065+:101F50008CC20014244300018CC200180043102BD3
60066+:101F60005040000A240700012402002714A20003A5
60067+:101F70003C0380080A0007B1240700013463010014
60068+:101F80009462004C24420001A462004C00091382B8
60069+:101F9000304300032C620002104000090080282119
60070+:101FA000146000040000000094C200340A0007C15D
60071+:101FB0003046FFFF8CC600380A0007C10080282188
60072+:101FC000000030213C040800248456C00A000706A3
60073+:101FD0000000000027BDFF90AFB60068AFB50064F9
60074+:101FE000AFB40060AFB3005CAFB20058AFB1005403
60075+:101FF000AFBF006CAFB000508C9000000080B021EB
60076+:102000003C0208008C4200E8960400328F83001CDA
60077+:102010002414FF8030843FFF0062182100042180D7
60078+:1020200000641821007410243C13800000A090214B
60079+:1020300090A50000AE620028920400323C02800CA1
60080+:102040003063007F00628821308400C02402004099
60081+:10205000148200320000A8218E3500388E2200182C
60082+:102060001440000224020001AE2200189202003C3B
60083+:10207000304200201440000E8F83001C000511C068
60084+:102080002442024000621821306400783C02008043
60085+:102090000082202500741824AE630800AE64081086
60086+:1020A0008E2200188E03000800431021AE22001873
60087+:1020B0008E22002C8E230018244200010062182B6F
60088+:1020C0001060004300000000924200002442000122
60089+:1020D000A24200003C0308008C6300F4304200FF81
60090+:1020E00050430001A2400000924200008F84001C77
60091+:1020F000000211C024420240248300403063007F6C
60092+:10210000008220213C02800A0094202400621821D1
60093+:10211000AE6400240A0008D2AEC30000920300326D
60094+:102120002402FFC000431024304200FF1440000589
60095+:1021300024020001AE220018962200340A00084250
60096+:102140003055FFFF8E22001424420001AE220018F9
60097+:102150009202003000021600000216030441001C27
60098+:10216000000000009602003227A400100080282101
60099+:10217000A7A20016960200320000302124070001B9
60100+:102180003042FFFFAF8200140E000706AFA0001C14
60101+:10219000960200328F83001C3C0408008C8400E807
60102+:1021A00030423FFF000211800064182100621821B4
60103+:1021B00000741024AE62002C3063007F3C02800E5D
60104+:1021C000006218219062000D3042007FA062000D75
60105+:1021D0009222000D304200105040007892420000E0
60106+:1021E0003C028008344401009482004C8EC30000FD
60107+:1021F0003C130800967300C62442FFFFA482004CE3
60108+:10220000946200329623000E3054FFFF3070FFFFBF
60109+:102210003C0308008C6300D000701807A7A30038A7
60110+:102220009482003E3063FFFF3042FFFF14620007DC
60111+:10223000000000008C8200303C038000244200300B
60112+:10224000AC62003C0A00086A8C82002C9482004038
60113+:102250003042FFFF5462000927A400408C820038FE
60114+:102260003C03800024420030AC62003C8C8200348D
60115+:10227000AC6200380A0008793C03800027A50038CA
60116+:1022800027A60048026038210E00068FA7A000484C
60117+:102290008FA300403C02800024630030AC43003830
60118+:1022A0008FA30044AC43003C3C0380003C0200058B
60119+:1022B000AC6200303C028008344401009482004249
60120+:1022C000346304003042FFFF0202102B1440000769
60121+:1022D000AF8300289482004E9483004202021021B2
60122+:1022E000004310230A00088F3043FFFF9483004E01
60123+:1022F00094820042026318210050102300621823C8
60124+:102300003063FFFF3C028008344401009482003CAB
60125+:102310003042FFFF14430003000000000A00089F42
60126+:10232000240300019482003C3042FFFF0062102B26
60127+:10233000144000058F8200289482003C0062102324
60128+:102340003043FFFF8F820028AC550000AC400004F2
60129+:10235000AC540008AC43000C3C02000634420010B0
60130+:102360003C038000AC620030000000000000000070
60131+:10237000000000008C620000304200101040FFFDA1
60132+:102380003C04800834840100001018C20064182145
60133+:102390009065006432020007240600010046100424
60134+:1023A00000451025A0620064948300429622000E2E
60135+:1023B00050430001A386001892420000244200010D
60136+:1023C000A24200003C0308008C6300F4304200FF8E
60137+:1023D00050430001A2400000924200008F84001C84
60138+:1023E000000211C0244202402483004000822021C8
60139+:1023F0002402FF80008220243063007F3C02800A98
60140+:10240000006218213C028000AC440024AEC30000EE
60141+:102410008FBF006C8FB600688FB500648FB400600A
60142+:102420008FB3005C8FB200588FB100548FB0005052
60143+:1024300003E0000827BD007027BDFFD8AFB3001C24
60144+:10244000AFB20018AFB10014AFB00010AFBF0020A2
60145+:102450000080982100E0802130B1FFFF0E000D8444
60146+:1024600030D200FF0000000000000000000000006B
60147+:102470008F8200208F830024AC510000AC520004F6
60148+:10248000AC530008AC40000CAC400010AC40001451
60149+:10249000AC4000189463001E02038025AC50001C61
60150+:1024A0000000000000000000000000002404000103
60151+:1024B0008FBF00208FB3001C8FB200188FB10014A3
60152+:1024C0008FB000100A000DB827BD002830A5FFFF0F
60153+:1024D0000A0008DC30C600FF3C02800834430100DB
60154+:1024E0009462000E3C080800950800C63046FFFFC5
60155+:1024F00014C000043402FFFF946500EA0A000929B1
60156+:102500008F84001C10C20027000000009462004E5F
60157+:102510009464003C3045FFFF00A6102300A6182B52
60158+:102520003087FFFF106000043044FFFF00C5102318
60159+:1025300000E210233044FFFF0088102B1040000EF3
60160+:1025400000E810233C028008344401002403000109
60161+:1025500034420080A44300162402FFFFA482000E30
60162+:10256000948500EA8F84001C0000302130A5FFFF15
60163+:102570000A0009013C0760200044102A10400009AD
60164+:102580003C0280083443008094620016304200010F
60165+:10259000104000043C0280009442007E244200145B
60166+:1025A000A462001603E000080000000027BDFFE061
60167+:1025B0003C028008AFBF001CAFB0001834420100DD
60168+:1025C000944300429442004C104000193068FFFFD1
60169+:1025D0009383001824020001146200298FBF001C9D
60170+:1025E0003C06800834D00100000810C200501021C1
60171+:1025F000904200643103000734C70148304200FFB5
60172+:10260000006210073042000134C9014E34C4012C6D
60173+:1026100034C5013E1040001634C601420E0006D2F9
60174+:10262000AFA90010960200420A0009463048FFFF99
60175+:102630003C028008344401009483004494820042A8
60176+:102640001043000F8FBF001C94820044A4820042FC
60177+:1026500094820050A482004E8C820038AC820030FC
60178+:1026600094820040A482003E9482004AA4820048E2
60179+:102670008FBF001C8FB000180A00090427BD00207E
60180+:102680008FB0001803E0000827BD002027BDFFA081
60181+:10269000AFB1004C3C118000AFBF0058AFB3005445
60182+:1026A000AFB20050AFB000483626018890C2000398
60183+:1026B0003044007FA3A400108E32018090C200003D
60184+:1026C0003043007F240200031062003BAF92001CE5
60185+:1026D00028620004104000062402000424020002C4
60186+:1026E000106200098FBF00580A000B0F8FB300540F
60187+:1026F0001062004D240200051062014E8FBF005889
60188+:102700000A000B0F8FB30054000411C002421021C5
60189+:102710002404FF8024420240004410242643004049
60190+:10272000AE2200243063007F3C02800A0062182140
60191+:102730009062003CAFA3003C00441025A062003C26
60192+:102740008FA3003C9062003C304200401040016C7E
60193+:102750008FBF00583C108008A3800018361001007D
60194+:102760008E0200E08C63003427A4003C27A50010F3
60195+:10277000004310210E0007C3AE0200E093A2001038
60196+:102780003C038000A20200D58C6202780440FFFE68
60197+:102790008F82001CAC62024024020002A06202444C
60198+:1027A0003C021000AC6202780E0009390000000003
60199+:1027B0000A000B0E8FBF00583C05800890C3000133
60200+:1027C00090A2000B1443014E8FBF005834A4008028
60201+:1027D0008C8200189082004C90A200083C0260009D
60202+:1027E0008C4254048C8300183C027FFF3442FFFF6C
60203+:1027F000006218243C0208008C4200B4AC8300182C
60204+:102800003C038000244200013C010800AC2200B4DB
60205+:102810008C6201F80440FFFE8F82001CAC6201C094
60206+:102820000A000AD6240200023C10800890C300016E
60207+:102830009202000B144301328FBF005827A40018E6
60208+:1028400036050110240600033C0260008C4254044B
60209+:102850000E000E470000000027A40028360501F0F6
60210+:102860000E000E47240600038FA200283603010045
60211+:10287000AE0200648FA2002CAE0200688FA200306E
60212+:10288000AE02006C93A40018906300D52402FF8070
60213+:102890000082102400431025304900FF3084007F5F
60214+:1028A0003122007F0082102A544000013929008023
60215+:1028B000000411C0244202402403FF800242102180
60216+:1028C00000431024AE220094264200403042007F94
60217+:1028D0003C038006004340218FA3001C2402FFFF1D
60218+:1028E000AFA800403C130800927300F71062003359
60219+:1028F00093A2001995030014304400FF3063FFFFDA
60220+:102900000064182B106000100000000095040014F3
60221+:102910008D07001C8D0600183084FFFF0044202323
60222+:102920000004210000E438210000102100E4202BE5
60223+:1029300000C2302100C43021AD07001CAD060018D4
60224+:102940000A000A2F93A20019950400148D07001C99
60225+:102950008D0600183084FFFF008220230004210030
60226+:10296000000010210080182100C2302300E4202B39
60227+:1029700000C4302300E33823AD07001CAD06001867
60228+:1029800093A200198FA30040A462001497A2001A1A
60229+:10299000A46200168FA2001CAC6200108FA2001C63
60230+:1029A000AC62000C93A20019A462002097A2001A46
60231+:1029B000A46200228FA2001CAC6200243C048008A8
60232+:1029C000348300808C6200388FA20020012088218F
60233+:1029D000AC62003C8FA20020AC82000093A20018E1
60234+:1029E000A062004C93A20018A0820009A0600068B9
60235+:1029F00093A20018105100512407FF803229007F54
60236+:102A0000000911C024420240024210213046007FDA
60237+:102A10003C03800000471024AC6200943C02800616
60238+:102A200000C2302190C2003CAFA60040000020212F
60239+:102A300000471025A0C2003C8FA80040950200026C
60240+:102A4000950300148D07001C3042FFFF3063FFFF29
60241+:102A50008D060018004310230002110000E2382107
60242+:102A600000E2102B00C4302100C23021AD07001C51
60243+:102A7000AD06001895020002A5020014A50000167C
60244+:102A80008D020008AD0200108D020008AD02000C9E
60245+:102A900095020002A5020020A50000228D02000878
60246+:102AA000AD0200249102003C304200401040001A68
60247+:102AB000262200013C108008A3A90038A38000183A
60248+:102AC000361001008E0200E08D03003427A4004080
60249+:102AD00027A50038004310210E0007C3AE0200E016
60250+:102AE00093A200383C038000A20200D58C620278D9
60251+:102AF0000440FFFE8F82001CAC62024024020002F0
60252+:102B0000A06202443C021000AC6202780E00093957
60253+:102B100000000000262200013043007F14730004EF
60254+:102B2000004020212403FF8002231024004320269C
60255+:102B300093A200180A000A4B309100FF93A40018DA
60256+:102B40008FA3001C2402FFFF1062000A308900FFDF
60257+:102B500024820001248300013042007F14530005C9
60258+:102B6000306900FF2403FF800083102400431026F7
60259+:102B7000304900FF3C028008904200080120882173
60260+:102B8000305000FF123000193222007F000211C0C5
60261+:102B900002421021244202402403FF8000431824F3
60262+:102BA0003C048000AC8300943042007F3C038006EC
60263+:102BB000004310218C43000C004020211060000BCA
60264+:102BC000AFA200400E00057E000000002623000199
60265+:102BD0002405FF803062007F145300020225202468
60266+:102BE000008518260A000AAF307100FF3C048008F7
60267+:102BF000348400808C8300183C027FFF3442FFFF46
60268+:102C000000621824AC8300183C0380008C6201F839
60269+:102C10000440FFFE00000000AC7201C0240200026C
60270+:102C2000A06201C43C021000AC6201F80A000B0E65
60271+:102C30008FBF00583C04800890C300019082000BB5
60272+:102C40001443002F8FBF0058349000809202000878
60273+:102C500030420040104000200000000092020008B6
60274+:102C60000002160000021603044100050240202164
60275+:102C70000E000ECC240500930A000B0E8FBF0058E7
60276+:102C80009202000924030018304200FF1443000D93
60277+:102C900002402021240500390E000E64000030217E
60278+:102CA0000E0003328F84001C8F82FF9424030012D5
60279+:102CB000A04300090E00033D8F84001C0A000B0E88
60280+:102CC0008FBF0058240500360E000E64000030212E
60281+:102CD0000A000B0E8FBF00580E0003320240202165
60282+:102CE000920200058F84001C344200200E00033D38
60283+:102CF000A20200050E0010758F84001C8FBF0058C3
60284+:102D00008FB300548FB200508FB1004C8FB0004889
60285+:102D100003E0000827BD00603C0280083445010044
60286+:102D20003C0280008C42014094A3000E0000302140
60287+:102D300000402021AF82001C3063FFFF3402FFFF00
60288+:102D4000106200063C0760202402FFFFA4A2000ED0
60289+:102D500094A500EA0A00090130A5FFFF03E000087E
60290+:102D60000000000027BDFFC83C0280003C06800830
60291+:102D7000AFB5002CAFB1001CAFBF0030AFB400281E
60292+:102D8000AFB30024AFB20020AFB00018345101003F
60293+:102D900034C501008C4301008E2200148CA400E491
60294+:102DA0000000A821AF83001C0044102318400052EB
60295+:102DB000A38000188E22001400005021ACA200E471
60296+:102DC00090C3000890A200D53073007FA3A200102A
60297+:102DD0008CB200E08CB400E4304200FF1053003BA2
60298+:102DE00093A200108F83001C2407FF80000211C0F3
60299+:102DF0000062102124420240246300400047102456
60300+:102E00003063007F3C0980003C08800A006818217C
60301+:102E1000AD2200248C62003427A4001427A50010E2
60302+:102E2000024280210290102304400028AFA3001426
60303+:102E30009062003C00E21024304200FF1440001970
60304+:102E4000020090219062003C34420040A062003CAD
60305+:102E50008F86001C93A3001024C200403042007FE4
60306+:102E6000004828213C0208008C4200F42463000141
60307+:102E7000306400FF14820002A3A30010A3A000107E
60308+:102E800093A20010AFA50014000211C0244202401A
60309+:102E900000C2102100471024AD2200240A000B4577
60310+:102EA00093A200100E0007C3000000003C0280083F
60311+:102EB00034420100AC5000E093A30010240A00014A
60312+:102EC000A04300D50A000B4593A200102402000184
60313+:102ED000154200093C0380008C6202780440FFFE2A
60314+:102EE0008F82001CAC62024024020002A0620244F5
60315+:102EF0003C021000AC6202789222000B2403000214
60316+:102F0000304200FF144300720000000096220008C7
60317+:102F1000304300FF24020082146200402402008437
60318+:102F20003C028000344901008D22000C95230006EC
60319+:102F3000000216023063FFFF3045003F24020027E5
60320+:102F400010A2000FAF83001428A200281040000830
60321+:102F5000240200312402002110A2000924020025CD
60322+:102F600010A20007938200190A000BBD00000000A8
60323+:102F700010A20007938200190A000BBD0000000098
60324+:102F80000E000777012020210A000C3D0000000000
60325+:102F90003C0380008C6202780440FFFE8F82001C9C
60326+:102FA000AC62024024020002A06202443C02100013
60327+:102FB000AC6202780A000C3D000000009523000678
60328+:102FC000912400058D25000C8D2600108D270018FA
60329+:102FD0008D28001C8D290020244200013C0108009E
60330+:102FE000A42356C63C010800A02456C53C01080095
60331+:102FF000AC2556CC3C010800AC2656D03C0108005C
60332+:10300000AC2756D83C010800AC2856DC3C0108002F
60333+:10301000AC2956E00A000C3DA38200191462000A94
60334+:10302000240200813C02800834420100944500EAF9
60335+:10303000922600058F84001C30A5FFFF30C600FFDC
60336+:103040000A000BFE3C0760211462005C00000000D7
60337+:103050009222000A304300FF306200201040000737
60338+:10306000306200403C02800834420100944500EA8E
60339+:103070008F84001C0A000BFC24060040104000074F
60340+:10308000000316003C02800834420100944500EA27
60341+:103090008F84001C0A000BFC24060041000216036A
60342+:1030A000044100463C02800834420100944500EA95
60343+:1030B0008F84001C2406004230A5FFFF3C076019E6
60344+:1030C0000E000901000000000A000C3D0000000095
60345+:1030D0009222000B24040016304200FF1044000628
60346+:1030E0003C0680009222000B24030017304200FFB0
60347+:1030F000144300320000000034C5010090A2000B10
60348+:10310000304200FF1444000B000080218CA20020FC
60349+:103110008CA400202403FF800043102400021140EF
60350+:103120003084007F004410253C032000004310251C
60351+:10313000ACC2083094A2000800021400000214037C
60352+:10314000044200012410000194A2000830420080D3
60353+:103150005040001A0200A82194A20008304220002A
60354+:10316000504000160200A8218CA300183C021C2D20
60355+:10317000344219ED106200110200A8213C0208003F
60356+:103180008C4200D4104000053C0280082403000457
60357+:1031900034420100A04300FC3C028008344201009C
60358+:1031A000944500EA8F84001C2406000630A5FFFF2A
60359+:1031B0000E0009013C0760210200A8210E00093918
60360+:1031C000000000009222000A304200081040000473
60361+:1031D00002A010210E0013790000000002A01021AF
60362+:1031E0008FBF00308FB5002C8FB400288FB3002420
60363+:1031F0008FB200208FB1001C8FB0001803E00008D0
60364+:1032000027BD00382402FF80008220243C02900069
60365+:1032100034420007008220253C028000AC4400209C
60366+:103220003C0380008C6200200440FFFE0000000090
60367+:1032300003E00008000000003C0380002402FF803F
60368+:10324000008220243462000700822025AC64002024
60369+:103250008C6200200440FFFE0000000003E0000834
60370+:103260000000000027BDFFD8AFB3001CAFB10014B1
60371+:10327000AFB00010AFBF0020AFB200183C1180000B
60372+:103280003C0280088E32002034530100AE2400201E
60373+:10329000966300EA000514003C074000004738250B
60374+:1032A00000A08021000030210E0009013065FFFFE1
60375+:1032B000240200A1160200022402FFFFA2620009FC
60376+:1032C000AE3200208FBF00208FB3001C8FB20018D9
60377+:1032D0008FB100148FB0001003E0000827BD002854
60378+:1032E0003C0280082403000527BDFFE834420100AA
60379+:1032F000A04300FCAFBF00103C0280008C420100E4
60380+:10330000240500A1004020210E000C67AF82001CA4
60381+:103310003C0380008C6202780440FFFE8F82001C18
60382+:103320008FBF001027BD0018AC62024024020002CB
60383+:10333000A06202443C021000AC62027803E0000884
60384+:103340000000000027BDFFE83C068000AFBF001072
60385+:1033500034C7010094E20008304400FF3883008243
60386+:10336000388200842C6300012C4200010062182581
60387+:103370001060002D24020083938200195040003B0E
60388+:103380008FBF00103C020800904256CC8CC4010054
60389+:103390003C06080094C656C63045003F38A30032AC
60390+:1033A00038A2003F2C6300012C4200010062182566
60391+:1033B000AF84001CAF860014A380001914600007BE
60392+:1033C00000E020212402002014A2001200000000CE
60393+:1033D0003402FFFF14C2000F00000000240200208E
60394+:1033E00014A2000500E028218CE300142402FFFF52
60395+:1033F0005062000B8FBF00103C040800248456C0AC
60396+:10340000000030210E000706240700010A000CD638
60397+:103410008FBF00100E000777000000008FBF001064
60398+:103420000A00093927BD001814820004240200850F
60399+:103430008CC501040A000CE1000020211482000662
60400+:103440002482FF808CC50104240440008FBF00103B
60401+:103450000A00016727BD0018304200FF2C4200021D
60402+:1034600010400004240200228FBF00100A000B2726
60403+:1034700027BD0018148200048F8200248FBF001023
60404+:103480000A000C8627BD00188C42000C1040001E5C
60405+:1034900000E0282190E300092402001814620003D0
60406+:1034A000240200160A000CFC240300081462000722
60407+:1034B00024020017240300123C02800834420080DA
60408+:1034C000A04300090A000D0994A7000854620007F0
60409+:1034D00094A700088F82FF942404FFFE9043000508
60410+:1034E00000641824A043000594A7000890A6001BC0
60411+:1034F0008CA4000094A500068FBF001000073C00BC
60412+:103500000A0008DC27BD00188FBF001003E0000888
60413+:1035100027BD00188F8500243C04800094A2002A57
60414+:103520008CA30034000230C02402FFF000C210243B
60415+:1035300000621821AC83003C8CA200303C03800068
60416+:10354000AC8200383C02005034420010AC620030C3
60417+:103550000000000000000000000000008C6200007D
60418+:10356000304200201040FFFD30C20008104000062D
60419+:103570003C0280008C620408ACA200208C62040C27
60420+:103580000A000D34ACA200248C430400ACA300203C
60421+:103590008C420404ACA200243C0300203C028000C6
60422+:1035A000AC4300303C0480008C8200300043102487
60423+:1035B0001440FFFD8F8600243C020040AC820030A6
60424+:1035C00094C3002A94C2002894C4002C94C5002EF1
60425+:1035D00024630001004410213064FFFFA4C20028CE
60426+:1035E00014850002A4C3002AA4C0002A03E0000836
60427+:1035F000000000008F84002427BDFFE83C05800404
60428+:1036000024840010AFBF00100E000E472406000AED
60429+:103610008F840024948200129483002E3042000F85
60430+:10362000244200030043180424027FFF0043102BB0
60431+:1036300010400002AC8300000000000D0E000D13CE
60432+:10364000000000008F8300248FBF001027BD0018EA
60433+:10365000946200149463001A3042000F00021500B7
60434+:10366000006218253C02800003E00008AC4300A083
60435+:103670008F8300243C028004944400069462001A64
60436+:103680008C650000A4640016004410233042FFFF44
60437+:103690000045102B03E00008384200018F8400240D
60438+:1036A0003C0780049486001A8C85000094E2000692
60439+:1036B000A482001694E3000600C310233042FFFFEB
60440+:1036C0000045102B384200011440FFF8A483001677
60441+:1036D00003E00008000000008F8400243C02800406
60442+:1036E000944200069483001A8C850000A482001680
60443+:1036F000006210233042FFFF0045102B38420001CA
60444+:103700005040000D8F850024006030213C0780046C
60445+:1037100094E20006A482001694E3000600C310237E
60446+:103720003042FFFF0045102B384200011440FFF8E3
60447+:10373000A48300168F8500243C03800034620400BB
60448+:103740008CA40020AF820020AC6400388CA200243E
60449+:10375000AC62003C3C020005AC62003003E00008B3
60450+:10376000ACA000048F8400243C0300068C8200047B
60451+:1037700000021140004310253C038000AC62003081
60452+:103780000000000000000000000000008C6200004B
60453+:10379000304200101040FFFD34620400AC80000491
60454+:1037A00003E00008AF8200208F86002427BDFFE0E1
60455+:1037B000AFB10014AFB00010AFBF00188CC300044D
60456+:1037C0008CC500248F820020309000FF94C4001A22
60457+:1037D00024630001244200202484000124A7002047
60458+:1037E000ACC30004AF820020A4C4001AACC70024FC
60459+:1037F00004A100060000882104E2000594C2001A1A
60460+:103800008CC2002024420001ACC2002094C2001AE5
60461+:1038100094C300282E040001004310262C4200010E
60462+:10382000004410245040000594C2001A24020001F4
60463+:10383000ACC2000894C2001A94C300280010202BC8
60464+:10384000004310262C4200010044102514400007BC
60465+:10385000000000008CC20008144000042402001084
60466+:103860008CC300041462000F8F8500240E000DA786
60467+:10387000241100018F820024944300289442001AEE
60468+:1038800014430003000000000E000D1300000000B0
60469+:10389000160000048F8500240E000D840000000037
60470+:1038A0008F85002494A2001E94A4001C24420001D1
60471+:1038B0003043FFFF14640002A4A2001EA4A0001E57
60472+:1038C0001200000A3C02800494A2001494A3001A7F
60473+:1038D0003042000F00021500006218253C028000F3
60474+:1038E000AC4300A00A000E1EACA0000894420006E3
60475+:1038F00094A3001A8CA40000A4A200160062102356
60476+:103900003042FFFF0044102B384200011040000DF0
60477+:1039100002201021006030213C07800494E2000660
60478+:10392000A4A2001694E3000600C310233042FFFF58
60479+:103930000044102B384200011440FFF8A4A30016E5
60480+:10394000022010218FBF00188FB100148FB000101B
60481+:1039500003E0000827BD002003E00008000000008D
60482+:103960008F82002C3C03000600021140004310250A
60483+:103970003C038000AC62003000000000000000004A
60484+:10398000000000008C620000304200101040FFFD7B
60485+:1039900034620400AF82002803E00008AF80002CEE
60486+:1039A00003E000080000102103E000080000000010
60487+:1039B0003084FFFF30A5FFFF0000182110800007B2
60488+:1039C000000000003082000110400002000420428C
60489+:1039D000006518210A000E3D0005284003E000089C
60490+:1039E0000060102110C0000624C6FFFF8CA200005A
60491+:1039F00024A50004AC8200000A000E4724840004C1
60492+:103A000003E000080000000010A0000824A3FFFF4E
60493+:103A1000AC86000000000000000000002402FFFF50
60494+:103A20002463FFFF1462FFFA2484000403E000080B
60495+:103A3000000000003C0280083442008024030001A2
60496+:103A4000AC43000CA4430010A4430012A443001490
60497+:103A500003E00008A44300168F82002427BDFFD88E
60498+:103A6000AFB3001CAFB20018AFB10014AFB000107C
60499+:103A7000AFBF00208C47000C248200802409FF8007
60500+:103A80003C08800E3043007F008080213C0A80008B
60501+:103A9000004920240068182130B100FF30D200FF17
60502+:103AA00010E000290000982126020100AD44002CFE
60503+:103AB000004928243042007F004820219062000005
60504+:103AC00024030050304200FF1443000400000000B3
60505+:103AD000AD45002C948200EA3053FFFF0E000D84A8
60506+:103AE000000000008F8200248F83002000112C0032
60507+:103AF0009442001E001224003484000100A22825F4
60508+:103B00003C02400000A22825AC7000008FBF0020BE
60509+:103B1000AC6000048FB20018AC7300088FB10014C1
60510+:103B2000AC60000C8FB3001CAC6400108FB00010B0
60511+:103B3000AC60001424040001AC60001827BD00280C
60512+:103B40000A000DB8AC65001C8FBF00208FB3001CAD
60513+:103B50008FB200188FB100148FB0001003E000087E
60514+:103B600027BD00283C06800034C201009043000FAE
60515+:103B7000240200101062000E2865001110A000073A
60516+:103B800024020012240200082405003A10620006F4
60517+:103B90000000302103E0000800000000240500358B
60518+:103BA0001462FFFC000030210A000E6400000000D7
60519+:103BB0008CC200748F83FF9424420FA003E000089E
60520+:103BC000AC62000C27BDFFE8AFBF00100E0003423F
60521+:103BD000240500013C0480088FBF0010240200016E
60522+:103BE00034830080A462001227BD00182402000163
60523+:103BF00003E00008A080001A27BDFFE0AFB2001864
60524+:103C0000AFB10014AFB00010AFBF001C30B2FFFF67
60525+:103C10000E000332008088213C028008345000806E
60526+:103C20009202000924030004304200FF1443000CF8
60527+:103C30003C028008124000082402000A0E000E5BBD
60528+:103C400000000000920200052403FFFE0043102440
60529+:103C5000A202000524020012A20200093C02800810
60530+:103C600034420080022020210E00033DA0400027A6
60531+:103C700016400003022020210E000EBF00000000AD
60532+:103C800002202021324600FF8FBF001C8FB2001897
60533+:103C90008FB100148FB00010240500380A000E64A4
60534+:103CA00027BD002027BDFFE0AFBF001CAFB200184A
60535+:103CB000AFB10014AFB000100E00033200808021BD
60536+:103CC0000E000E5B000000003C02800834450080BE
60537+:103CD00090A2000924120018305100FF1232000394
60538+:103CE0000200202124020012A0A2000990A20005D7
60539+:103CF0002403FFFE004310240E00033DA0A2000594
60540+:103D00000200202124050020163200070000302187
60541+:103D10008FBF001C8FB200188FB100148FB000103D
60542+:103D20000A00034227BD00208FBF001C8FB200187D
60543+:103D30008FB100148FB00010240500390A000E6402
60544+:103D400027BD002027BDFFE83C028000AFB0001077
60545+:103D5000AFBF0014344201009442000C2405003629
60546+:103D60000080802114400012304600FF0E00033214
60547+:103D7000000000003C02800834420080240300124E
60548+:103D8000A043000990430005346300100E000E5B51
60549+:103D9000A04300050E00033D020020210200202167
60550+:103DA0000E000342240500200A000F3C0000000022
60551+:103DB0000E000E64000000000E00033202002021FD
60552+:103DC0003C0280089043001B2405FF9F0200202135
60553+:103DD000006518248FBF00148FB00010A043001B93
60554+:103DE0000A00033D27BD001827BDFFE0AFBF001844
60555+:103DF000AFB10014AFB0001030B100FF0E000332BD
60556+:103E0000008080213C02800824030012344200809C
60557+:103E10000E000E5BA04300090E00033D02002021AE
60558+:103E200002002021022030218FBF00188FB1001422
60559+:103E30008FB00010240500350A000E6427BD002055
60560+:103E40003C0480089083000E9082000A1443000B0B
60561+:103E5000000028218F82FF942403005024050001D4
60562+:103E600090420000304200FF1443000400000000B4
60563+:103E70009082000E24420001A082000E03E00008A0
60564+:103E800000A010213C0380008C6201F80440FFFE7A
60565+:103E900024020002AC6401C0A06201C43C02100014
60566+:103EA00003E00008AC6201F827BDFFE0AFB20018E4
60567+:103EB0003C128008AFB10014AFBF001CAFB00010BF
60568+:103EC00036510080922200092403000A304200FF8C
60569+:103ED0001443003E000000008E4300048E22003890
60570+:103EE000506200808FBF001C92220000240300500B
60571+:103EF000304200FF144300253C0280008C42014008
60572+:103F00008E4300043642010002202821AC43001CED
60573+:103F10009622005C8E2300383042FFFF00021040E2
60574+:103F200000621821AE23001C8E4300048E2400384A
60575+:103F30009622005C006418233042FFFF0003184300
60576+:103F4000000210400043102A10400006000000004C
60577+:103F50008E4200048E230038004310230A000FAA6B
60578+:103F6000000220439622005C3042FFFF0002204006
60579+:103F70003C0280083443010034420080ACA4002C91
60580+:103F8000A040002424020001A062000C0E000F5E7D
60581+:103F900000000000104000538FBF001C3C02800056
60582+:103FA0008C4401403C0380008C6201F80440FFFE19
60583+:103FB00024020002AC6401C0A06201C43C021000F3
60584+:103FC000AC6201F80A0010078FBF001C92220009A2
60585+:103FD00024030010304200FF144300043C02800020
60586+:103FE0008C4401400A000FEE0000282192220009B3
60587+:103FF00024030016304200FF14430006240200147C
60588+:10400000A22200093C0280008C4401400A001001F9
60589+:104010008FBF001C8E2200388E23003C00431023EB
60590+:10402000044100308FBF001C92220027244200016F
60591+:10403000A2220027922200272C42000414400016DE
60592+:104040003C1080009222000924030004304200FF4B
60593+:10405000144300093C0280008C4401408FBF001CC7
60594+:104060008FB200188FB100148FB000102405009398
60595+:104070000A000ECC27BD00208C440140240500938B
60596+:104080008FBF001C8FB200188FB100148FB00010CA
60597+:104090000A000F4827BD00208E0401400E000332A5
60598+:1040A000000000008E4200042442FFFFAE420004E4
60599+:1040B0008E22003C2442FFFFAE22003C0E00033D56
60600+:1040C0008E0401408E0401408FBF001C8FB2001887
60601+:1040D0008FB100148FB00010240500040A000342C1
60602+:1040E00027BD00208FB200188FB100148FB00010D0
60603+:1040F00003E0000827BD00203C0680008CC2018838
60604+:104100003C038008346500809063000E00021402B6
60605+:10411000304400FF306300FF1464000E3C0280084E
60606+:1041200090A20026304200FF104400098F82FF94C5
60607+:10413000A0A400262403005090420000304200FF5B
60608+:1041400014430006000000000A0005A18CC4018091
60609+:104150003C02800834420080A044002603E00008AE
60610+:104160000000000027BDFFE030E700FFAFB20018FD
60611+:10417000AFBF001CAFB10014AFB0001000809021A1
60612+:1041800014E0000630C600FF000000000000000D33
60613+:10419000000000000A001060240001163C038008A3
60614+:1041A0009062000E304200FF14460023346200800B
60615+:1041B00090420026304200FF1446001F000000001D
60616+:1041C0009062000F304200FF1446001B0000000008
60617+:1041D0009062000A304200FF144600038F90FF9463
60618+:1041E0000000000D8F90FF948F82FF983C1180009B
60619+:1041F000AE05003CAC450000A066000A0E0003328C
60620+:104200008E240100A20000240E00033D8E24010034
60621+:104210003C0380008C6201F80440FFFE240200028F
60622+:10422000AC7201C0A06201C43C021000AC6201F893
60623+:104230000A0010618FBF001C000000000000000D8C
60624+:10424000000000002400013F8FBF001C8FB2001847
60625+:104250008FB100148FB0001003E0000827BD0020CC
60626+:104260008F83FF943C0280008C44010034420100A3
60627+:104270008C65003C9046001B0A00102724070001B3
60628+:104280003C0280089043000E9042000A0043102632
60629+:10429000304200FF03E000080002102B27BDFFE0C2
60630+:1042A0003C028008AFB10014AFB00010AFBF0018DF
60631+:1042B0003450008092020005240300303042003068
60632+:1042C00014430085008088218F8200248C42000CDA
60633+:1042D000104000828FBF00180E000D840000000007
60634+:1042E0008F860020ACD100009202000892030009E2
60635+:1042F000304200FF00021200306300FF004310252F
60636+:10430000ACC200049202004D000216000002160327
60637+:1043100004410005000000003C0308008C630048D5
60638+:104320000A00109F3C1080089202000830420040B2
60639+:10433000144000030000182192020027304300FFC0
60640+:104340003C108008361100809222004D00031E00B0
60641+:10435000304200FF0002140000621825ACC30008C0
60642+:104360008E2400308F820024ACC4000C8E250034D3
60643+:104370009443001E3C02C00BACC50010006218251F
60644+:104380008E22003800002021ACC200148E22003C96
60645+:10439000ACC200180E000DB8ACC3001C8E020004A5
60646+:1043A0008F8400203C058000AC8200008E2200201B
60647+:1043B000AC8200048E22001CAC8200088E220058C1
60648+:1043C0008CA3007400431021AC82000C8E22002CC0
60649+:1043D000AC8200108E2200408E23004400021400A4
60650+:1043E00000431025AC8200149222004D240300806B
60651+:1043F000304200FF1443000400000000AC800018AD
60652+:104400000A0010E38F8200248E23000C2402000196
60653+:104410001062000E2402FFFF92220008304200408A
60654+:104420001440000A2402FFFF8E23000C8CA20074AB
60655+:10443000006218233C0208000062102414400002AD
60656+:10444000000028210060282100051043AC820018DC
60657+:104450008F820024000020219443001E3C02C00CE7
60658+:10446000006218258F8200200E000DB8AC43001C9E
60659+:104470003C038008346201008C4200008F850020DC
60660+:10448000346300808FBF0018ACA20000ACA0000411
60661+:104490008C6400488F8200248FB10014ACA4000803
60662+:1044A000ACA0000CACA00010906300059446001E68
60663+:1044B0003C02400D00031E0000C23025ACA30014D6
60664+:1044C0008FB00010ACA0001824040001ACA6001CA2
60665+:1044D0000A000DB827BD00208FBF00188FB100144F
60666+:1044E0008FB0001003E0000827BD00203C028000D0
60667+:1044F0009443007C3C02800834460100308400FF75
60668+:104500003065FFFF2402000524A34650A0C4000C20
60669+:104510005482000C3065FFFF90C2000D2C42000752
60670+:104520001040000724A30A0090C3000D24020014C9
60671+:104530000062100400A210210A00111F3045FFFF85
60672+:104540003065FFFF3C0280083442008003E0000831
60673+:10455000A44500143C03800834680080AD05003891
60674+:10456000346701008CE2001C308400FF00A210239D
60675+:104570001840000330C600FF24A2FFFCACE2001C80
60676+:1045800030820001504000083C0380088D02003C4E
60677+:1045900000A2102304410012240400058C620004D0
60678+:1045A00010A2000F3C0380088C62000414A2001EBD
60679+:1045B000000000003C0208008C4200D8304200207D
60680+:1045C000104000093C0280083462008090630008BB
60681+:1045D0009042004C144300043C0280082404000470
60682+:1045E0000A00110900000000344300803442010039
60683+:1045F000A040000C24020001A462001410C0000AB4
60684+:104600003C0280008C4401003C0380008C6201F875
60685+:104610000440FFFE24020002AC6401C0A06201C499
60686+:104620003C021000AC6201F803E00008000000004A
60687+:1046300027BDFFE800A61823AFBF00101860008058
60688+:10464000308800FF3C02800834470080A0E000244E
60689+:1046500034440100A0E000278C82001C00A210233B
60690+:1046600004400056000000008CE2003C94E3005C33
60691+:104670008CE4002C004530233063FFFF00C3182179
60692+:104680000083202B1080000400E018218CE2002C15
60693+:104690000A00117800A2102194E2005C3042FFFF72
60694+:1046A00000C2102100A21021AC62001C3C02800854
60695+:1046B000344400809482005C8C83001C3042FFFFF5
60696+:1046C0000002104000A210210043102B10400004F3
60697+:1046D000000000008C82001C0A00118B3C06800840
60698+:1046E0009482005C3042FFFF0002104000A21021C3
60699+:1046F0003C06800834C3010034C70080AC82001C33
60700+:10470000A060000CACE500388C62001C00A21023F5
60701+:104710001840000224A2FFFCAC62001C3102000120
60702+:10472000104000083C0380088CE2003C00A21023EB
60703+:1047300004410012240400058CC2000410A20010E1
60704+:104740008FBF00108C62000414A2004F8FBF0010B6
60705+:104750003C0208008C4200D8304200201040000A81
60706+:104760003C02800834620080906300089042004C54
60707+:10477000144300053C028008240400048FBF00108D
60708+:104780000A00110927BD001834430080344201009B
60709+:10479000A040000C24020001A46200143C0280002E
60710+:1047A0008C4401003C0380008C6201F80440FFFE51
60711+:1047B000240200020A0011D8000000008CE2001C54
60712+:1047C000004610230043102B54400001ACE5001CB0
60713+:1047D00094E2005C3042FFFF0062102B144000079F
60714+:1047E0002402000294E2005C8CE3001C3042FFFFD4
60715+:1047F00000621821ACE3001C24020002ACE5003882
60716+:104800000E000F5EA082000C1040001F8FBF001032
60717+:104810003C0280008C4401003C0380008C6201F863
60718+:104820000440FFFE24020002AC6401C0A06201C487
60719+:104830003C021000AC6201F80A0011F08FBF0010BA
60720+:1048400031020010104000108FBF00103C028008A1
60721+:10485000344500808CA3001C94A2005C00661823E1
60722+:104860003042FFFF006218213C023FFF3444FFFF4B
60723+:104870000083102B544000010080182100C3102138
60724+:10488000ACA2001C8FBF001003E0000827BD001879
60725+:1048900027BDFFE800C0402100A63023AFBF0010B5
60726+:1048A00018C00026308A00FF3C028008344900808E
60727+:1048B0008D24001C8D23002C008820230064182BDD
60728+:1048C0001060000F344701008CE2002000461021E8
60729+:1048D000ACE200208CE200200044102B1440000BBE
60730+:1048E0003C023FFF8CE2002000441023ACE2002099
60731+:1048F0009522005C3042FFFF0A0012100082202146
60732+:10490000ACE00020008620213C023FFF3443FFFF43
60733+:104910000064102B54400001006020213C028008FC
60734+:104920003442008000851821AC43001CA0400024C4
60735+:10493000A04000270A0012623C03800831420010A8
60736+:10494000104000433C0380083C06800834C40080CB
60737+:104950008C82003C004810235840003E34660080A2
60738+:104960009082002424420001A0820024908200242E
60739+:104970003C0308008C630024304200FF0043102BEE
60740+:10498000144000688FBF001034C201008C42001C2C
60741+:1049900000A2102318400063000000008CC3000434
60742+:1049A0009482005C006818233042FFFF0003184324
60743+:1049B000000210400043102A1040000500000000D3
60744+:1049C0008CC20004004810230A0012450002104364
60745+:1049D0009482005C3042FFFF000210403C068008D9
60746+:1049E000AC82002C34C5008094A2005C8CA4002C06
60747+:1049F00094A3005C3042FFFF00021040008220219F
60748+:104A00003063FFFF0083202101041021ACA2001CB1
60749+:104A10008CC2000434C60100ACC2001C2402000297
60750+:104A20000E000F5EA0C2000C1040003E8FBF0010B1
60751+:104A30003C0280008C4401003C0380008C6201F841
60752+:104A40000440FFFE240200020A001292000000004F
60753+:104A500034660080ACC50038346401008C82001CD0
60754+:104A600000A210231840000224A2FFFCAC82001C0C
60755+:104A7000314200015040000A3C0380088CC2003CD7
60756+:104A800000A2102304430014240400058C620004D7
60757+:104A900014A200033C0380080A00128424040005C9
60758+:104AA0008C62000414A2001F8FBF00103C0208009B
60759+:104AB0008C4200D8304200201040000A3C0280089E
60760+:104AC00034620080906300089042004C144300055B
60761+:104AD0003C028008240400048FBF00100A00110962
60762+:104AE00027BD00183443008034420100A040000C70
60763+:104AF00024020001A46200143C0280008C440100E6
60764+:104B00003C0380008C6201F80440FFFE2402000296
60765+:104B1000AC6401C0A06201C43C021000AC6201F8A8
60766+:104B20008FBF001003E0000827BD001827BDFFE875
60767+:104B30003C0A8008AFBF0010354900808D22003C40
60768+:104B400000C04021308400FF004610231840009D23
60769+:104B500030E700FF354701002402000100A63023A2
60770+:104B6000A0E0000CA0E0000DA522001418C0002455
60771+:104B7000308200108D23001C8D22002C0068182329
60772+:104B80000043102B1040000F000000008CE20020BA
60773+:104B900000461021ACE200208CE200200043102BE4
60774+:104BA0001440000B3C023FFF8CE200200043102326
60775+:104BB000ACE200209522005C3042FFFF0A0012C1E7
60776+:104BC00000621821ACE00020006618213C023FFF83
60777+:104BD0003446FFFF00C3102B5440000100C01821D1
60778+:104BE0003C0280083442008000651821AC43001C60
60779+:104BF000A0400024A04000270A00130F3C038008B7
60780+:104C0000104000403C0380088D22003C00481023E7
60781+:104C10005840003D34670080912200242442000166
60782+:104C2000A1220024912200243C0308008C6300246C
60783+:104C3000304200FF0043102B1440009A8FBF001039
60784+:104C40008CE2001C00A21023184000960000000017
60785+:104C50008D4300049522005C006818233042FFFF5A
60786+:104C600000031843000210400043102A10400005C2
60787+:104C7000012020218D420004004810230A0012F276
60788+:104C8000000210439522005C3042FFFF00021040FA
60789+:104C90003C068008AC82002C34C5008094A2005CE5
60790+:104CA0008CA4002C94A3005C3042FFFF0002104053
60791+:104CB000008220213063FFFF0083182101031021AF
60792+:104CC000ACA2001C8CC2000434C60100ACC2001CA3
60793+:104CD000240200020E000F5EA0C2000C1040007102
60794+:104CE0008FBF00103C0280008C4401003C03800018
60795+:104CF0008C6201F80440FFFE240200020A0013390E
60796+:104D00000000000034670080ACE500383466010024
60797+:104D10008CC2001C00A210231840000224A2FFFC39
60798+:104D2000ACC2001C30820001504000083C038008E7
60799+:104D30008CE2003C00A2102304430051240400052F
60800+:104D40008C62000410A2003E3C0380088C620004C8
60801+:104D500054A200548FBF00103C0208008C4200D8BF
60802+:104D600030420020104000063C028008346200807F
60803+:104D7000906300089042004C104300403C028008C1
60804+:104D80003443008034420100A040000C24020001A2
60805+:104D9000A46200143C0280008C4401003C038000AB
60806+:104DA0008C6201F80440FFFE24020002AC6401C0E2
60807+:104DB000A06201C43C021000AC6201F80A00137743
60808+:104DC0008FBF001024020005A120002714E2000A72
60809+:104DD0003C038008354301009062000D2C42000620
60810+:104DE000504000053C0380089062000D2442000101
60811+:104DF000A062000D3C03800834670080ACE50038F9
60812+:104E0000346601008CC2001C00A21023184000026E
60813+:104E100024A2FFFCACC2001C308200015040000AFA
60814+:104E20003C0380088CE2003C00A2102304410014E3
60815+:104E3000240400058C62000414A200033C038008D3
60816+:104E40000A00136E240400058C62000414A20015ED
60817+:104E50008FBF00103C0208008C4200D83042002076
60818+:104E60001040000A3C028008346200809063000811
60819+:104E70009042004C144300053C02800824040004C6
60820+:104E80008FBF00100A00110927BD001834430080AD
60821+:104E900034420100A040000C24020001A46200146E
60822+:104EA0008FBF001003E0000827BD00183C0B8008EE
60823+:104EB00027BDFFE83C028000AFBF00103442010074
60824+:104EC000356A00809044000A356901008C45001461
60825+:104ED0008D4800389123000C308400FF0105102319
60826+:104EE0001C4000B3306700FF2CE20006504000B1C8
60827+:104EF0008FBF00102402000100E2300430C2000322
60828+:104F00005440000800A8302330C2000C144000A117
60829+:104F100030C20030144000A38FBF00100A00143BC1
60830+:104F20000000000018C00024308200108D43001CD7
60831+:104F30008D42002C006818230043102B1040000FF6
60832+:104F4000000000008D22002000461021AD2200202C
60833+:104F50008D2200200043102B1440000B3C023FFF29
60834+:104F60008D22002000431023AD2200209542005CDA
60835+:104F70003042FFFF0A0013AF00621821AD2000206D
60836+:104F8000006618213C023FFF3446FFFF00C3102B90
60837+:104F90005440000100C018213C02800834420080C7
60838+:104FA00000651821AC43001CA0400024A04000274D
60839+:104FB0000A0013FD3C038008104000403C038008B9
60840+:104FC0008D42003C004810231840003D34670080AB
60841+:104FD0009142002424420001A14200249142002475
60842+:104FE0003C0308008C630024304200FF0043102B78
60843+:104FF000144000708FBF00108D22001C00A21023EF
60844+:105000001840006C000000008D6300049542005CB5
60845+:10501000006818233042FFFF0003184300021040CD
60846+:105020000043102A10400005014020218D62000439
60847+:10503000004810230A0013E0000210439542005C70
60848+:105040003042FFFF000210403C068008AC82002C7A
60849+:1050500034C5008094A2005C8CA4002C94A3005C56
60850+:105060003042FFFF00021040008220213063FFFF2A
60851+:105070000083182101031021ACA2001C8CC2000483
60852+:1050800034C60100ACC2001C240200020E000F5EF8
60853+:10509000A0C2000C104000478FBF00103C028000EF
60854+:1050A0008C4401003C0380008C6201F80440FFFE48
60855+:1050B000240200020A00142D000000003467008062
60856+:1050C000ACE50038346601008CC2001C00A210233D
60857+:1050D0001840000224A2FFFCACC2001C3082000178
60858+:1050E0005040000A3C0380088CE2003C00A21023E0
60859+:1050F00004430014240400058C62000414A200037D
60860+:105100003C0380080A00141F240400058C6200047C
60861+:1051100014A200288FBF00103C0208008C4200D867
60862+:10512000304200201040000A3C02800834620080B7
60863+:10513000906300089042004C144300053C02800834
60864+:10514000240400048FBF00100A00110927BD0018B5
60865+:105150003443008034420100A040000C24020001CE
60866+:10516000A46200143C0280008C4401003C038000D7
60867+:105170008C6201F80440FFFE24020002AC6401C00E
60868+:10518000A06201C43C021000AC6201F80A00143BAA
60869+:105190008FBF00108FBF0010010030210A00115A8C
60870+:1051A00027BD0018010030210A00129927BD001800
60871+:1051B0008FBF001003E0000827BD00183C038008E3
60872+:1051C0003464010024020003A082000C8C620004FD
60873+:1051D00003E00008AC82001C3C05800834A300807A
60874+:1051E0009062002734A501002406004324420001F8
60875+:1051F000A0620027906300273C0208008C42004810
60876+:10520000306300FF146200043C07602194A500EAAB
60877+:105210000A00090130A5FFFF03E0000800000000BC
60878+:1052200027BDFFE8AFBF00103C0280000E00144411
60879+:105230008C4401803C02800834430100A060000CD3
60880+:105240008C4200048FBF001027BD001803E0000847
60881+:10525000AC62001C27BDFFE03C028008AFBF001815
60882+:10526000AFB10014AFB000103445008034460100E7
60883+:105270003C0880008D09014090C3000C8CA4003CC8
60884+:105280008CA200381482003B306700FF9502007C3E
60885+:1052900090A30027146000093045FFFF2402000599
60886+:1052A00054E200083C04800890C2000D2442000132
60887+:1052B000A0C2000D0A00147F3C048008A0C0000DAD
60888+:1052C0003C048008348201009042000C2403000555
60889+:1052D000304200FF1443000A24A205DC348300801E
60890+:1052E000906200272C4200075040000524A20A00CB
60891+:1052F00090630027240200140062100400A2102111
60892+:105300003C108008361000803045FFFF012020212E
60893+:105310000E001444A60500149602005C8E030038AB
60894+:105320003C1180003042FFFF000210400062182153
60895+:10533000AE03001C0E0003328E24014092020025B1
60896+:1053400034420040A20200250E00033D8E2401409D
60897+:105350008E2401403C0380008C6201F80440FFFE73
60898+:1053600024020002AC6401C0A06201C43C0210002F
60899+:10537000AC6201F88FBF00188FB100148FB000101D
60900+:1053800003E0000827BD00203C0360103C02080039
60901+:1053900024420174AC62502C8C6250003C048000AA
60902+:1053A00034420080AC6250003C0208002442547C2D
60903+:1053B0003C010800AC2256003C020800244254384C
60904+:1053C0003C010800AC2256043C020002AC840008F8
60905+:1053D000AC82000C03E000082402000100A0302190
60906+:1053E0003C1C0800279C56083C0200023C050400B7
60907+:1053F00000852826008220260004102B2CA5000101
60908+:105400002C840001000210803C0308002463560035
60909+:105410000085202500431821108000030000102182
60910+:10542000AC6600002402000103E000080000000058
60911+:105430003C1C0800279C56083C0200023C05040066
60912+:1054400000852826008220260004102B2CA50001B0
60913+:105450002C840001000210803C03080024635600E5
60914+:105460000085202500431821108000050000102130
60915+:105470003C02080024425438AC62000024020001BF
60916+:1054800003E00008000000003C0200023C030400AE
60917+:1054900000821026008318262C4200012C63000194
60918+:1054A000004310251040000B000028213C1C080080
60919+:1054B000279C56083C0380008C62000824050001EC
60920+:1054C00000431025AC6200088C62000C00441025DB
60921+:1054D000AC62000C03E0000800A010213C1C080096
60922+:1054E000279C56083C0580008CA3000C0004202754
60923+:1054F000240200010064182403E00008ACA3000C9F
60924+:105500003C020002148200063C0560008CA208D018
60925+:105510002403FFFE0043102403E00008ACA208D0DF
60926+:105520003C02040014820005000000008CA208D098
60927+:105530002403FFFD00431024ACA208D003E00008C0
60928+:10554000000000003C02601A344200108C430080CE
60929+:1055500027BDFFF88C440084AFA3000093A3000094
60930+:10556000240200041462001AAFA4000493A20001F4
60931+:105570001040000797A300023062FFFC3C0380004C
60932+:10558000004310218C4200000A001536AFA200042F
60933+:105590003062FFFC3C03800000431021AC4400005B
60934+:1055A000A3A000003C0560008CA208D02403FFFEED
60935+:1055B0003C04601A00431024ACA208D08FA300045E
60936+:1055C0008FA2000034840010AC830084AC82008081
60937+:1055D00003E0000827BD000827BDFFE8AFBF0010AB
60938+:1055E0003C1C0800279C56083C0280008C43000CA1
60939+:1055F0008C420004004318243C0200021060001496
60940+:10560000006228243C0204003C04000210A00005B3
60941+:10561000006210243C0208008C4256000A00155B10
60942+:1056200000000000104000073C0404003C02080099
60943+:105630008C4256040040F809000000000A00156082
60944+:10564000000000000000000D3C1C0800279C5608CC
60945+:105650008FBF001003E0000827BD0018800802403B
60946+:1056600080080100800800808008000000000C8095
60947+:105670000000320008000E9808000EF408000F88A1
60948+:1056800008001028080010748008010080080080BD
60949+:10569000800800000A000028000000000000000050
60950+:1056A0000000000D6370362E322E316200000000C3
60951+:1056B00006020104000000000000000000000000DD
60952+:1056C000000000000000000038003C000000000066
60953+:1056D00000000000000000000000000000000020AA
60954+:1056E00000000000000000000000000000000000BA
60955+:1056F00000000000000000000000000000000000AA
60956+:10570000000000000000000021003800000000013F
60957+:105710000000002B000000000000000400030D400A
60958+:105720000000000000000000000000000000000079
60959+:105730000000000000000000100000030000000056
60960+:105740000000000D0000000D3C020800244259AC8E
60961+:105750003C03080024635BF4AC4000000043202BB2
60962+:105760001480FFFD244200043C1D080037BD9FFC4F
60963+:1057700003A0F0213C100800261000A03C1C0800EB
60964+:10578000279C59AC0E0002F6000000000000000D3E
60965+:1057900027BDFFB4AFA10000AFA20004AFA3000873
60966+:1057A000AFA4000CAFA50010AFA60014AFA700185F
60967+:1057B000AFA8001CAFA90020AFAA0024AFAB0028FF
60968+:1057C000AFAC002CAFAD0030AFAE0034AFAF00389F
60969+:1057D000AFB8003CAFB90040AFBC0044AFBF004819
60970+:1057E0000E000820000000008FBF00488FBC00445E
60971+:1057F0008FB900408FB8003C8FAF00388FAE0034B7
60972+:105800008FAD00308FAC002C8FAB00288FAA002406
60973+:105810008FA900208FA8001C8FA700188FA6001446
60974+:105820008FA500108FA4000C8FA300088FA2000486
60975+:105830008FA1000027BD004C3C1B60188F7A5030B0
60976+:10584000377B502803400008AF7A000000A01821E1
60977+:1058500000801021008028213C0460003C0760008B
60978+:105860002406000810600006348420788C42000072
60979+:10587000ACE220088C63000003E00008ACE3200CDD
60980+:105880000A000F8100000000240300403C02600079
60981+:1058900003E00008AC4320003C0760008F86000452
60982+:1058A0008CE520740086102100A2182B14600007DC
60983+:1058B000000028218F8AFDA024050001A1440013C7
60984+:1058C0008F89000401244021AF88000403E0000810
60985+:1058D00000A010218F84FDA08F8500049086001306
60986+:1058E00030C300FF00A31023AF82000403E00008D0
60987+:1058F000A08000138F84FDA027BDFFE8AFB000108B
60988+:10590000AFBF001490890011908700112402002875
60989+:10591000312800FF3906002830E300FF2485002CE1
60990+:105920002CD00001106200162484001C0E00006EB2
60991+:10593000000000008F8FFDA03C05600024020204DF
60992+:1059400095EE003E95ED003C000E5C0031ACFFFF93
60993+:10595000016C5025ACAA2010520000012402000462
60994+:10596000ACA22000000000000000000000000000C9
60995+:105970008FBF00148FB0001003E0000827BD00188F
60996+:105980000A0000A6000028218F85FDA027BDFFD8B2
60997+:10599000AFBF0020AFB3001CAFB20018AFB100140E
60998+:1059A000AFB000100080982190A4001124B0001C1A
60999+:1059B00024B1002C308300FF386200280E000090D4
61000+:1059C0002C5200010E00009800000000020020216F
61001+:1059D0001240000202202821000028210E00006E43
61002+:1059E000000000008F8DFDA03C0880003C05600099
61003+:1059F00095AC003E95AB003C02683025000C4C0095
61004+:105A0000316AFFFF012A3825ACA7201024020202C8
61005+:105A1000ACA6201452400001240200028FBF0020D7
61006+:105A20008FB3001C8FB200188FB100148FB000101C
61007+:105A300027BD002803E00008ACA2200027BDFFE03E
61008+:105A4000AFB20018AFB10014AFB00010AFBF001C70
61009+:105A50003C1160008E2320748F82000430D0FFFF41
61010+:105A600030F2FFFF1062000C2406008F0E00006E63
61011+:105A7000000000003C06801F0010440034C5FF00F9
61012+:105A80000112382524040002AE2720100000302126
61013+:105A9000AE252014AE2420008FBF001C8FB200184A
61014+:105AA0008FB100148FB0001000C0102103E0000877
61015+:105AB00027BD002027BDFFE0AFB0001030D0FFFFB2
61016+:105AC000AFBF0018AFB100140E00006E30F1FFFF41
61017+:105AD00000102400009180253C036000AC70201071
61018+:105AE0008FBF00188FB100148FB000102402000483
61019+:105AF000AC62200027BD002003E000080000102158
61020+:105B000027BDFFE03C046018AFBF0018AFB1001420
61021+:105B1000AFB000108C8850002403FF7F34028071E6
61022+:105B20000103382434E5380C241F00313C1980006F
61023+:105B3000AC8550003C11800AAC8253BCAF3F0008DA
61024+:105B40000E00054CAF9100400E00050A3C116000AC
61025+:105B50000E00007D000000008E3008083C0F570941
61026+:105B60002418FFF00218602435EEE00035EDF00057
61027+:105B7000018E5026018D58262D4600012D69000109
61028+:105B8000AF86004C0E000D09AF8900503C06601630
61029+:105B90008CC700003C0860148D0500A03C03FFFF8B
61030+:105BA00000E320243C02535300052FC2108200550D
61031+:105BB00034D07C00960201F2A780006C10400003F4
61032+:105BC000A780007C384B1E1EA78B006C960201F844
61033+:105BD000104000048F8D0050384C1E1EA78C007C96
61034+:105BE0008F8D005011A000058F83004C240E0020E3
61035+:105BF000A78E007CA78E006C8F83004C1060000580
61036+:105C00009785007C240F0020A78F007CA78F006C55
61037+:105C10009785007C2CB8008153000001240500808A
61038+:105C20009784006C2C91040152200001240404008C
61039+:105C30001060000B3C0260008FBF00188FB1001491
61040+:105C40008FB0001027BD0020A784006CA785007CC2
61041+:105C5000A380007EA780007403E00008A780009264
61042+:105C60008C4704382419103C30FFFFFF13F9000360
61043+:105C700030A8FFFF1100004624030050A380007EDF
61044+:105C80009386007E50C00024A785007CA780007CFE
61045+:105C90009798007CA780006CA7800074A780009272
61046+:105CA0003C010800AC3800800E00078700000000AF
61047+:105CB0003C0F60008DED0808240EFFF03C0B600ED9
61048+:105CC000260C0388356A00100000482100002821B6
61049+:105CD00001AE20243C105709AF8C0010AF8A004859
61050+:105CE000AF89001810900023AF8500148FBF0018F3
61051+:105CF0008FB100148FB0001027BD002003E0000812
61052+:105D0000AF80005400055080014648218D260004D4
61053+:105D10000A00014800D180219798007CA784006C7C
61054+:105D2000A7800074A78000923C010800AC38008076
61055+:105D30000E000787000000003C0F60008DED080892
61056+:105D4000240EFFF03C0B600E260C0388356A001011
61057+:105D5000000048210000282101AE20243C105709F2
61058+:105D6000AF8C0010AF8A0048AF8900181490FFDF95
61059+:105D7000AF85001424110001AF9100548FBF0018AB
61060+:105D80008FB100148FB0001003E0000827BD002081
61061+:105D90000A00017BA383007E3083FFFF8F880040D1
61062+:105DA0008F87003C000321403C0580003C020050EE
61063+:105DB000008248253C0660003C0A010034AC040027
61064+:105DC0008CCD08E001AA58241160000500000000F5
61065+:105DD0008CCF08E024E7000101EA7025ACCE08E092
61066+:105DE0008D19001001805821ACB900388D180014AD
61067+:105DF000ACB8003CACA9003000000000000000007E
61068+:105E00000000000000000000000000000000000092
61069+:105E100000000000000000003C0380008C640000D3
61070+:105E2000308200201040FFFD3C0F60008DED08E047
61071+:105E30003C0E010001AE18241460FFE100000000D8
61072+:105E4000AF87003C03E00008AF8B00588F8500400F
61073+:105E5000240BFFF03C06800094A7001A8CA90024B4
61074+:105E600030ECFFFF000C38C000EB5024012A402129
61075+:105E7000ACC8003C8CA400248CC3003C00831023DD
61076+:105E800018400033000000008CAD002025A2000166
61077+:105E90003C0F0050ACC2003835EE00103C068000CC
61078+:105EA000ACCE003000000000000000000000000048
61079+:105EB00000000000000000000000000000000000E2
61080+:105EC000000000003C0480008C9900003338002062
61081+:105ED0001300FFFD30E20008104000173C0980006D
61082+:105EE0008C880408ACA800108C83040CACA30014AC
61083+:105EF0003C1900203C188000AF19003094AE001807
61084+:105F000094AF001C01CF3021A4A6001894AD001A54
61085+:105F100025A70001A4A7001A94AB001A94AC001E98
61086+:105F2000118B00030000000003E0000800000000E7
61087+:105F300003E00008A4A0001A8D2A0400ACAA0010F7
61088+:105F40008D240404ACA400140A0002183C1900209B
61089+:105F50008CA200200A0002003C0F00500A0001EE53
61090+:105F60000000000027BDFFE8AFBF00100E000232A6
61091+:105F7000000000008F8900408FBF00103C038000AC
61092+:105F8000A520000A9528000A9527000427BD0018BF
61093+:105F90003105FFFF30E6000F0006150000A22025A6
61094+:105FA00003E00008AC6400803C0508008CA50020DC
61095+:105FB0008F83000C27BDFFE8AFB00010AFBF001407
61096+:105FC00010A300100000802124040001020430040A
61097+:105FD00000A6202400C3102450440006261000010F
61098+:105FE000001018802787FDA41480000A006718217C
61099+:105FF000261000012E0900025520FFF38F83000CAC
61100+:10600000AF85000C8FBF00148FB0001003E00008B4
61101+:1060100027BD00188C6800003C058000ACA8002457
61102+:106020000E000234261000013C0508008CA500205B
61103+:106030000A0002592E0900022405000100851804F7
61104+:106040003C0408008C84002027BDFFC8AFBF00348B
61105+:1060500000831024AFBE0030AFB7002CAFB60028CD
61106+:10606000AFB50024AFB40020AFB3001CAFB200182E
61107+:10607000AFB1001410400051AFB000108F84004049
61108+:10608000948700069488000A00E8302330D5FFFF8B
61109+:1060900012A0004B8FBF0034948B0018948C000A20
61110+:1060A000016C50233142FFFF02A2482B1520000251
61111+:1060B00002A02021004020212C8F000515E00002C5
61112+:1060C00000809821241300040E0001C102602021E9
61113+:1060D0008F87004002609021AF80004494F4000A52
61114+:1060E000026080211260004E3291FFFF3C1670006A
61115+:1060F0003C1440003C1E20003C1760008F99005863
61116+:106100008F380000031618241074004F0283F82BF8
61117+:1061100017E0003600000000107E00478F86004424
61118+:1061200014C0003A2403000102031023022320219B
61119+:106130003050FFFF1600FFF13091FFFF8F870040C6
61120+:106140003C1100203C108000AE11003094EB000A9E
61121+:106150003C178000024B5021A4EA000A94E9000A8F
61122+:1061600094E800043123FFFF3106000F00062D00E4
61123+:106170000065F025AEFE008094F3000A94F6001846
61124+:1061800012D30036001221408CFF00148CF4001052
61125+:1061900003E468210000C02101A4782B029870213B
61126+:1061A00001CF6021ACED0014ACEC001002B238233A
61127+:1061B00030F5FFFF16A0FFB88F8400408FBF00347A
61128+:1061C0008FBE00308FB7002C8FB600288FB500240B
61129+:1061D0008FB400208FB3001C8FB200188FB1001451
61130+:1061E0008FB0001003E0000827BD00381477FFCC03
61131+:1061F0008F8600440E000EE202002021004018218C
61132+:106200008F86004410C0FFC9020310230270702360
61133+:106210008F87004001C368210A0002E431B2FFFF0A
61134+:106220008F86004414C0FFC93C1100203C10800040
61135+:106230000A0002AEAE1100300E00046602002021FA
61136+:106240000A0002DB00401821020020210E0009395B
61137+:10625000022028210A0002DB004018210E0001EE76
61138+:10626000000000000A0002C702B2382327BDFFC8A1
61139+:10627000AFB7002CAFB60028AFB50024AFB40020F4
61140+:10628000AFB3001CAFB20018AFB10014AFB0001034
61141+:10629000AFBF00300E00011B241300013C047FFF40
61142+:1062A0003C0380083C0220003C010800AC20007048
61143+:1062B0003496FFFF34770080345200033C1512C03F
61144+:1062C000241400013C1080002411FF800E000245C0
61145+:1062D000000000008F8700488F8B00188F89001402
61146+:1062E0008CEA00EC8CE800E8014B302B01092823F4
61147+:1062F00000A6102314400006014B18231440000E82
61148+:106300003C05800002A3602B1180000B0000000000
61149+:106310003C0560008CEE00EC8CED00E88CA4180CC1
61150+:10632000AF8E001804800053AF8D00148F8F0010C3
61151+:10633000ADF400003C0580008CBF00003BF900017B
61152+:10634000333800011700FFE13C0380008C6201003C
61153+:1063500024060C0010460009000000008C680100B3
61154+:106360002D043080548000103C0480008C690100B2
61155+:106370002D2331811060000C3C0480008CAA0100A8
61156+:1063800011460004000020218CA6010024C5FF81D5
61157+:1063900030A400FF8E0B01000E000269AE0B00243A
61158+:1063A0000A00034F3C0480008C8D01002DAC3300AB
61159+:1063B00011800022000000003C0708008CE70098D4
61160+:1063C00024EE00013C010800AC2E00983C04800043
61161+:1063D0008C8201001440000300000000566000148D
61162+:1063E0003C0440008C9F01008C9801000000982123
61163+:1063F00003F1C82400193940330F007F00EF7025E6
61164+:1064000001D26825AC8D08308C8C01008C85010090
61165+:10641000258B0100017130240006514030A3007F1C
61166+:106420000143482501324025AC8808303C04400037
61167+:10643000AE0401380A00030E000000008C99010030
61168+:10644000240F0020AC99002092F80000330300FFD5
61169+:10645000106F000C241F0050547FFFDD3C048000AF
61170+:106460008C8401000E00154E000000000A00034F4E
61171+:106470003C04800000963824ACA7180C0A000327BF
61172+:106480008F8F00108C8501000E0008F72404008017
61173+:106490000A00034F3C04800000A4102B24030001D9
61174+:1064A00010400009000030210005284000A4102BF6
61175+:1064B00004A00003000318405440FFFC00052840DE
61176+:1064C0005060000A0004182B0085382B54E00004AB
61177+:1064D0000003184200C33025008520230003184222
61178+:1064E0001460FFF9000528420004182B03E000089F
61179+:1064F00000C310213084FFFF30C600FF3C0780003E
61180+:106500008CE201B80440FFFE00064C000124302557
61181+:106510003C08200000C820253C031000ACE00180AE
61182+:10652000ACE50184ACE4018803E00008ACE301B809
61183+:106530003C0660008CC5201C2402FFF03083020062
61184+:10654000308601001060000E00A2282434A500014E
61185+:106550003087300010E0000530830C0034A50004C3
61186+:106560003C04600003E00008AC85201C1060FFFDC7
61187+:106570003C04600034A5000803E00008AC85201C42
61188+:1065800054C0FFF334A500020A0003B03087300086
61189+:1065900027BDFFE8AFB00010AFBF00143C0760009C
61190+:1065A000240600021080001100A080218F83005873
61191+:1065B0000E0003A78C6400188F8200580000202171
61192+:1065C000240600018C45000C0E000398000000001A
61193+:1065D0001600000224020003000010218FBF0014E7
61194+:1065E0008FB0001003E0000827BD00188CE8201CC5
61195+:1065F0002409FFF001092824ACE5201C8F870058EE
61196+:106600000A0003CD8CE5000C3C02600E00804021A6
61197+:1066100034460100240900180000000000000000BA
61198+:10662000000000003C0A00503C0380003547020097
61199+:10663000AC68003834640400AC65003CAC670030E2
61200+:106640008C6C0000318B00201160FFFD2407FFFFE0
61201+:106650002403007F8C8D00002463FFFF248400044A
61202+:10666000ACCD00001467FFFB24C60004000000004E
61203+:10667000000000000000000024A402000085282B78
61204+:106680003C0300203C0E80002529FFFF010540212E
61205+:10669000ADC300301520FFE00080282103E0000892
61206+:1066A000000000008F82005827BDFFD8AFB3001C48
61207+:1066B000AFBF0020AFB20018AFB10014AFB00010F0
61208+:1066C00094460002008098218C5200182CC300814F
61209+:1066D0008C4800048C4700088C51000C8C49001039
61210+:1066E000106000078C4A00142CC4000414800013AE
61211+:1066F00030EB000730C5000310A0001000000000C0
61212+:106700002410008B02002021022028210E00039873
61213+:10671000240600031660000224020003000010217A
61214+:106720008FBF00208FB3001C8FB200188FB10014F0
61215+:106730008FB0001003E0000827BD00281560FFF1AE
61216+:106740002410008B3C0C80003C030020241F00011F
61217+:10675000AD830030AF9F0044000000000000000047
61218+:10676000000000002419FFF024D8000F031978243A
61219+:106770003C1000D0AD88003801F0702524CD000316
61220+:106780003C08600EAD87003C35850400AD8E0030BE
61221+:10679000000D38823504003C3C0380008C6B000007
61222+:1067A000316200201040FFFD0000000010E00008F2
61223+:1067B00024E3FFFF2407FFFF8CA800002463FFFFF2
61224+:1067C00024A50004AC8800001467FFFB24840004A7
61225+:1067D0003C05600EACA60038000000000000000080
61226+:1067E000000000008F8600543C0400203C0780001D
61227+:1067F000ACE4003054C000060120202102402021DA
61228+:106800000E0003A7000080210A00041D02002021C1
61229+:106810000E0003DD01402821024020210E0003A7C5
61230+:10682000000080210A00041D0200202127BDFFE096
61231+:10683000AFB200183092FFFFAFB10014AFBF001C21
61232+:10684000AFB000101640000D000088210A0004932C
61233+:106850000220102124050003508500278CE5000C40
61234+:106860000000000D262800013111FFFF24E2002066
61235+:106870000232802B12000019AF8200588F82004430
61236+:10688000144000168F8700583C0670003C0320001F
61237+:106890008CE5000000A62024148300108F84006083
61238+:1068A000000544023C09800000A980241480FFE90F
61239+:1068B000310600FF2CCA000B5140FFEB26280001D7
61240+:1068C000000668803C0E080025CE575801AE6021B6
61241+:1068D0008D8B0000016000080000000002201021E4
61242+:1068E0008FBF001C8FB200188FB100148FB0001042
61243+:1068F00003E0000827BD00200E0003982404008454
61244+:106900001600FFD88F8700580A000474AF8000601B
61245+:10691000020028210E0003BF240400018F870058C5
61246+:106920000A000474AF820060020028210E0003BF39
61247+:10693000000020210A0004A38F8700580E000404E1
61248+:10694000020020218F8700580A000474AF82006083
61249+:1069500030AFFFFF000F19C03C0480008C9001B8DD
61250+:106960000600FFFE3C1920043C181000AC83018097
61251+:10697000AC800184AC990188AC9801B80A00047518
61252+:106980002628000190E2000390E30002000020218D
61253+:106990000002FE0000033A0000FF2825240600083C
61254+:1069A0000E000398000000001600FFDC2402000324
61255+:1069B0008F870058000010210A000474AF82006025
61256+:1069C00090E8000200002021240600090A0004C308
61257+:1069D00000082E0090E4000C240900FF308500FF21
61258+:1069E00010A900150000302190F9000290F8000372
61259+:1069F000308F00FF94EB000400196E000018740043
61260+:106A0000000F62000186202501AE5025014B28258C
61261+:106A10003084FF8B0A0004C32406000A90E30002BE
61262+:106A200090FF0004000020210003360000DF28252D
61263+:106A30000A0004C32406000B0A0004D52406008BB8
61264+:106A4000000449C23127003F000443423C02800059
61265+:106A500000082040240316802CE60020AC43002CC4
61266+:106A600024EAFFE02482000114C0000330A900FFE3
61267+:106A700000801021314700FF000260803C0D800043
61268+:106A8000240A0001018D20213C0B000E00EA28049D
61269+:106A9000008B302111200005000538278CCE000026
61270+:106AA00001C5382503E00008ACC700008CD8000001
61271+:106AB0000307782403E00008ACCF000027BDFFE007
61272+:106AC000AFB10014AFB00010AFBF00183C076000BA
61273+:106AD0008CE408083402F0003C1160003083F000C0
61274+:106AE000240501C03C04800E000030211062000625
61275+:106AF000241000018CEA08083149F0003928E00030
61276+:106B00000008382B000780403C0D0200AE2D081411
61277+:106B1000240C16803C0B80008E2744000E000F8B47
61278+:106B2000AD6C002C120000043C02169124050001FB
61279+:106B3000120500103C023D2C345800E0AE384408E9
61280+:106B40003C1108008E31007C8FBF00183C066000AD
61281+:106B500000118540360F16808FB100148FB00010E1
61282+:106B60003C0E020027BD0020ACCF442003E000080B
61283+:106B7000ACCE08103C0218DA345800E0AE384408B5
61284+:106B80003C1108008E31007C8FBF00183C0660006D
61285+:106B900000118540360F16808FB100148FB00010A1
61286+:106BA0003C0E020027BD0020ACCF442003E00008CB
61287+:106BB000ACCE08100A0004EB240500010A0004EB27
61288+:106BC0000000282124020400A7820024A780001CC2
61289+:106BD000000020213C06080024C65A582405FFFF67
61290+:106BE00024890001000440803124FFFF01061821A0
61291+:106BF0002C87002014E0FFFAAC6500002404040098
61292+:106C0000A7840026A780001E000020213C06080063
61293+:106C100024C65AD82405FFFF248D0001000460809B
61294+:106C200031A4FFFF018658212C8A00201540FFFA6D
61295+:106C3000AD650000A7800028A7800020A780002263
61296+:106C4000000020213C06080024C65B582405FFFFF5
61297+:106C5000249900010004C0803324FFFF030678213B
61298+:106C60002C8E000415C0FFFAADE500003C05600065
61299+:106C70008CA73D002403E08F00E31024344601403C
61300+:106C800003E00008ACA63D002487007F000731C266
61301+:106C900024C5FFFF000518C2246400013082FFFFF5
61302+:106CA000000238C0A78400303C010800AC27003047
61303+:106CB000AF80002C0000282100002021000030219E
61304+:106CC0002489000100A728213124FFFF2CA81701E7
61305+:106CD000110000032C8300801460FFF924C600011A
61306+:106CE00000C02821AF86002C10C0001DA786002AF6
61307+:106CF00024CAFFFF000A11423C08080025085B581F
61308+:106D00001040000A00002021004030212407FFFF2E
61309+:106D1000248E00010004688031C4FFFF01A86021B7
61310+:106D20000086582B1560FFFAAD87000030A2001FC7
61311+:106D30005040000800043080240300010043C804D0
61312+:106D400000041080004878212738FFFF03E0000886
61313+:106D5000ADF8000000C820212405FFFFAC8500002D
61314+:106D600003E000080000000030A5FFFF30C6FFFF71
61315+:106D700030A8001F0080602130E700FF0005294295
61316+:106D80000000502110C0001D24090001240B000147
61317+:106D900025180001010B2004330800FF0126782686
61318+:106DA000390E00202DED00012DC2000101A2182591
61319+:106DB0001060000D014450250005C880032C4021BF
61320+:106DC0000100182110E0000F000A20278D040000A8
61321+:106DD000008A1825AD03000024AD00010000402109
61322+:106DE0000000502131A5FFFF252E000131C9FFFF12
61323+:106DF00000C9102B1040FFE72518000103E0000830
61324+:106E0000000000008D0A0000014440240A0005D162
61325+:106E1000AC68000027BDFFE830A5FFFF30C6FFFFCC
61326+:106E2000AFB00010AFBF001430E7FFFF00005021EB
61327+:106E30003410FFFF0000602124AF001F00C0482174
61328+:106E4000241800012419002005E0001601E010219B
61329+:106E50000002F943019F682A0009702B01AE40240B
61330+:106E600011000017000C18800064102110E00005CC
61331+:106E70008C4B000000F840040008382301675824B8
61332+:106E800000003821154000410000402155600016E7
61333+:106E90003169FFFF258B0001316CFFFF05E1FFEC3D
61334+:106EA00001E0102124A2003E0002F943019F682A5C
61335+:106EB0000009702B01AE40241500FFEB000C188078
61336+:106EC000154600053402FFFF020028210E0005B51B
61337+:106ED00000003821020010218FBF00148FB0001075
61338+:106EE00003E0000827BD00181520000301601821E9
61339+:106EF000000B1C0224080010306A00FF154000053A
61340+:106F0000306E000F250D000800031A0231A800FFA3
61341+:106F1000306E000F15C00005307F000325100004FF
61342+:106F200000031902320800FF307F000317E000055C
61343+:106F3000386900012502000200031882304800FF72
61344+:106F4000386900013123000110600004310300FFA3
61345+:106F5000250A0001314800FF310300FF000C6940A1
61346+:106F600001A34021240A000110CAFFD53110FFFF00
61347+:106F7000246E000131C800FF1119FFC638C9000195
61348+:106F80002D1F002053E0001C258B0001240D000163
61349+:106F90000A000648240E002051460017258B0001E8
61350+:106FA00025090001312800FF2D0900205120001281
61351+:106FB000258B000125430001010D5004014B1024D5
61352+:106FC000250900011440FFF4306AFFFF3127FFFF5D
61353+:106FD00010EE000C2582FFFF304CFFFF0000502117
61354+:106FE0003410FFFF312800FF2D0900205520FFF24B
61355+:106FF00025430001258B0001014648260A000602B0
61356+:10700000316CFFFF00003821000050210A000654B7
61357+:107010003410FFFF27BDFFD8AFB0001030F0FFFFE6
61358+:10702000AFB10014001039423211FFE000071080A8
61359+:10703000AFB3001C00B1282330D3FFFFAFB200185C
61360+:1070400030A5FFFF00809021026030210044202104
61361+:10705000AFBF00200E0005E03207001F022288218A
61362+:107060003403FFFF0240202102002821026030216A
61363+:1070700000003821104300093231FFFF02201021A7
61364+:107080008FBF00208FB3001C8FB200188FB1001487
61365+:107090008FB0001003E0000827BD00280E0005E0B7
61366+:1070A0000000000000408821022010218FBF002036
61367+:1070B0008FB3001C8FB200188FB100148FB0001076
61368+:1070C00003E0000827BD0028000424003C03600002
61369+:1070D000AC603D0810A00002348210063482101605
61370+:1070E00003E00008AC623D0427BDFFE0AFB0001034
61371+:1070F000309000FF2E020006AFBF001810400008BD
61372+:10710000AFB10014001030803C03080024635784A2
61373+:1071100000C328218CA400000080000800000000AB
61374+:10712000000020218FBF00188FB100148FB0001015
61375+:107130000080102103E0000827BD00209791002A5D
61376+:1071400016200051000020213C020800904200332C
61377+:107150000A0006BB00000000978D002615A0003134
61378+:10716000000020210A0006BB2402000897870024A3
61379+:1071700014E0001A00001821006020212402000100
61380+:107180001080FFE98FBF0018000429C2004530219C
61381+:1071900000A6582B1160FFE43C0880003C0720004B
61382+:1071A000000569C001A76025AD0C00203C038008E4
61383+:1071B0002402001F2442FFFFAC6000000441FFFDD9
61384+:1071C0002463000424A5000100A6702B15C0FFF560
61385+:1071D000000569C00A0006A58FBF00189787001C2C
61386+:1071E0003C04080024845A58240504000E0006605C
61387+:1071F00024060001978B002424440001308AFFFFFD
61388+:107200002569FFFF2D48040000402821150000409B
61389+:10721000A789002424AC3800000C19C00A0006B964
61390+:10722000A780001C9787001E3C04080024845AD8BD
61391+:10723000240504000E00066024060001979900262C
61392+:10724000244400013098FFFF272FFFFF2F0E04007A
61393+:107250000040882115C0002CA78F0026A780001EA3
61394+:107260003A020003262401003084FFFF0E00068D41
61395+:107270002C4500010011F8C027F00100001021C0CA
61396+:107280000A0006BB240200089785002E978700227B
61397+:107290003C04080024845B580E00066024060001AC
61398+:1072A0009787002A8F89002C2445000130A8FFFF12
61399+:1072B00024E3FFFF0109302B0040802114C0001897
61400+:1072C000A783002AA7800022978500300E000F7543
61401+:1072D00002002021244A05003144FFFF0E00068DE4
61402+:1072E000240500013C05080094A500320E000F752E
61403+:1072F00002002021244521003C0208009042003376
61404+:107300000A0006BB000521C00A0006F3A784001E80
61405+:1073100024AC3800000C19C00A0006B9A784001C70
61406+:107320000A00070DA7850022308400FF27BDFFE873
61407+:107330002C820006AFBF0014AFB000101040001543
61408+:1073400000A03821000440803C0308002463579CBF
61409+:10735000010328218CA40000008000080000000028
61410+:1073600024CC007F000751C2000C59C23170FFFFCE
61411+:107370002547C40030E5FFFF2784001C02003021B0
61412+:107380000E0005B52407000197860028020620217B
61413+:10739000A78400288FBF00148FB0001003E00008FE
61414+:1073A00027BD00183C0508008CA50030000779C2F5
61415+:1073B0000E00038125E4DF003045FFFF3C04080098
61416+:1073C00024845B58240600010E0005B52407000143
61417+:1073D000978E002A8FBF00148FB0001025CD0001BA
61418+:1073E00027BD001803E00008A78D002A0007C9C2C6
61419+:1073F0002738FF00001878C231F0FFFF3C04080076
61420+:1074000024845AD802002821240600010E0005B564
61421+:1074100024070001978D0026260E0100000E84002F
61422+:1074200025AC00013C0B6000A78C0026AD603D0838
61423+:1074300036040006000030213C0760008CE23D0469
61424+:10744000305F000617E0FFFD24C9000100061B00A5
61425+:10745000312600FF006440252CC50004ACE83D0443
61426+:1074600014A0FFF68FBF00148FB0001003E00008D7
61427+:1074700027BD0018000751C22549C8002406000195
61428+:10748000240700013C04080024845A580E0005B566
61429+:107490003125FFFF978700248FBF00148FB00010A5
61430+:1074A00024E6000127BD001803E00008A786002499
61431+:1074B0003C0660183C090800252900FCACC9502C8A
61432+:1074C0008CC850003C0580003C020002350700805B
61433+:1074D000ACC750003C04080024841FE03C030800B3
61434+:1074E00024631F98ACA50008ACA2000C3C01080066
61435+:1074F000AC2459A43C010800AC2359A803E00008BF
61436+:107500002402000100A030213C1C0800279C59AC3B
61437+:107510003C0C04003C0B0002008B3826008C4026FB
61438+:107520002CE200010007502B2D050001000A4880C5
61439+:107530003C030800246359A4004520250123182199
61440+:107540001080000300001021AC660000240200013E
61441+:1075500003E00008000000003C1C0800279C59AC18
61442+:107560003C0B04003C0A0002008A3026008B3826BF
61443+:107570002CC200010006482B2CE5000100094080C8
61444+:107580003C030800246359A4004520250103182169
61445+:1075900010800005000010213C0C0800258C1F986D
61446+:1075A000AC6C00002402000103E0000800000000B1
61447+:1075B0003C0900023C080400008830260089382677
61448+:1075C0002CC30001008028212CE400010083102539
61449+:1075D0001040000B000030213C1C0800279C59ACD7
61450+:1075E0003C0A80008D4E00082406000101CA68256F
61451+:1075F000AD4D00088D4C000C01855825AD4B000C9D
61452+:1076000003E0000800C010213C1C0800279C59AC76
61453+:107610003C0580008CA6000C0004202724020001F9
61454+:1076200000C4182403E00008ACA3000C3C020002D4
61455+:107630001082000B3C0560003C070400108700032B
61456+:107640000000000003E00008000000008CA908D042
61457+:10765000240AFFFD012A402403E00008ACA808D05A
61458+:107660008CA408D02406FFFE0086182403E000083E
61459+:10767000ACA308D03C05601A34A600108CC300806F
61460+:1076800027BDFFF88CC50084AFA3000093A40000C1
61461+:107690002402001010820003AFA5000403E00008DC
61462+:1076A00027BD000893A7000114E0001497AC000266
61463+:1076B00097B800023C0F8000330EFFFC01CF682119
61464+:1076C000ADA50000A3A000003C0660008CC708D058
61465+:1076D0002408FFFE3C04601A00E82824ACC508D04A
61466+:1076E0008FA300048FA200003499001027BD00086A
61467+:1076F000AF22008003E00008AF2300843C0B800031
61468+:10770000318AFFFC014B48218D2800000A00080C3B
61469+:10771000AFA8000427BDFFE8AFBF00103C1C080065
61470+:10772000279C59AC3C0580008CA4000C8CA2000462
61471+:107730003C0300020044282410A0000A00A31824DF
61472+:107740003C0604003C0400021460000900A610245A
61473+:107750001440000F3C0404000000000D3C1C080015
61474+:10776000279C59AC8FBF001003E0000827BD00180C
61475+:107770003C0208008C4259A40040F80900000000B7
61476+:107780003C1C0800279C59AC0A0008358FBF00102C
61477+:107790003C0208008C4259A80040F8090000000093
61478+:1077A0000A00083B000000003C0880008D0201B880
61479+:1077B0000440FFFE35090180AD2400003C031000A9
61480+:1077C00024040040AD250004A1240008A1260009DE
61481+:1077D000A527000A03E00008AD0301B83084FFFFCD
61482+:1077E0000080382130A5FFFF000020210A00084555
61483+:1077F000240600803087FFFF8CA400002406003898
61484+:107800000A000845000028218F8300788F860070C9
61485+:107810001066000B008040213C07080024E75B68ED
61486+:10782000000328C000A710218C440000246300013D
61487+:10783000108800053063000F5466FFFA000328C06B
61488+:1078400003E00008000010213C07080024E75B6CFF
61489+:1078500000A7302103E000088CC200003C03900028
61490+:1078600034620001008220253C038000AC640020CB
61491+:107870008C65002004A0FFFE0000000003E000086B
61492+:10788000000000003C0280003443000100832025FA
61493+:1078900003E00008AC44002027BDFFE0AFB10014B6
61494+:1078A0003091FFFFAFB00010AFBF001812200013DF
61495+:1078B00000A080218CA20000240400022406020003
61496+:1078C0001040000F004028210E0007250000000096
61497+:1078D00000001021AE000000022038218FBF0018E8
61498+:1078E0008FB100148FB0001000402021000028212B
61499+:1078F000000030210A00084527BD00208CA20000AE
61500+:10790000022038218FBF00188FB100148FB00010F3
61501+:107910000040202100002821000030210A000845F5
61502+:1079200027BD002000A010213087FFFF8CA5000498
61503+:107930008C4400000A000845240600068F83FD9C45
61504+:1079400027BDFFE8AFBF0014AFB00010906700087C
61505+:10795000008010210080282130E600400000202116
61506+:1079600010C000088C5000000E0000BD0200202155
61507+:10797000020020218FBF00148FB000100A000548BC
61508+:1079800027BD00180E0008A4000000000E0000BD76
61509+:1079900002002021020020218FBF00148FB00010B0
61510+:1079A0000A00054827BD001827BDFFE0AFB0001052
61511+:1079B0008F90FD9CAFBF001CAFB20018AFB1001498
61512+:1079C00092060001008088210E00087230D2000467
61513+:1079D00092040005001129C2A6050000348300406E
61514+:1079E000A20300050E00087C022020210E00054A9B
61515+:1079F0000220202124020001AE02000C02202821D6
61516+:107A0000A602001024040002A602001224060200AE
61517+:107A1000A60200140E000725A60200161640000F4D
61518+:107A20008FBF001C978C00743C0B08008D6B007896
61519+:107A30002588FFFF3109FFFF256A0001012A382B45
61520+:107A400010E00006A78800743C0F6006240E0016A4
61521+:107A500035ED0010ADAE00508FBF001C8FB2001886
61522+:107A60008FB100148FB0001003E0000827BD002084
61523+:107A700027BDFFE0AFB10014AFBF0018AFB00010DA
61524+:107A80001080000400A088212402008010820007DA
61525+:107A9000000000000000000D8FBF00188FB100141F
61526+:107AA0008FB0001003E0000827BD00200E00087210
61527+:107AB00000A020218F86FD9C0220202190C500057A
61528+:107AC0000E00087C30B000FF2403003E1603FFF1D7
61529+:107AD0003C0680008CC401780480FFFE34C801405D
61530+:107AE000240900073C071000AD11000002202021EE
61531+:107AF000A10900048FBF00188FB100148FB00010CF
61532+:107B0000ACC701780A0008C527BD002027BDFFE0EB
61533+:107B1000AFB00010AFBF0018AFB100143C10800030
61534+:107B20008E110020000000000E00054AAE04002067
61535+:107B3000AE1100208FBF00188FB100148FB000105D
61536+:107B400003E0000827BD00203084FFFF00803821BB
61537+:107B50002406003500A020210A0008450000282145
61538+:107B60003084FFFF008038212406003600A0202149
61539+:107B70000A0008450000282127BDFFD0AFB500242A
61540+:107B80003095FFFFAFB60028AFB40020AFBF002C88
61541+:107B9000AFB3001CAFB20018AFB10014AFB000100B
61542+:107BA00030B6FFFF12A000270000A0218F920058DE
61543+:107BB0008E4300003C0680002402004000033E0289
61544+:107BC00000032C0230E4007F006698241482001D1C
61545+:107BD00030A500FF8F8300682C68000A1100001098
61546+:107BE0008F8D0044000358803C0C0800258C57B84A
61547+:107BF000016C50218D4900000120000800000000A8
61548+:107C000002D4302130C5FFFF0E0008522404008446
61549+:107C1000166000028F920058AF8000688F8D00447C
61550+:107C20002659002026980001032090213314FFFFDD
61551+:107C300015A00004AF9900580295202B1480FFDC9A
61552+:107C400000000000028010218FBF002C8FB600289A
61553+:107C50008FB500248FB400208FB3001C8FB20018A2
61554+:107C60008FB100148FB0001003E0000827BD003072
61555+:107C70002407003414A70149000000009247000EB9
61556+:107C80008F9FFDA08F90FD9C24181600A3E700197C
61557+:107C90009242000D3C0880003C07800CA3E20018D3
61558+:107CA000964A00123C0D60003C117FFFA60A005C62
61559+:107CB000964400103623FFFF240200053099FFFF91
61560+:107CC000AE1900548E46001CAD1800288CEF000041
61561+:107CD0008DAE444801E6482601C93021AE06003881
61562+:107CE0008E05003824CB00013C0E7F00AE05003C21
61563+:107CF0008E0C003CAFEC0004AE0B00208E13002075
61564+:107D0000AE13001CA3E0001BAE03002CA3E2001284
61565+:107D10008E4A001424130050AE0A00348E0400343E
61566+:107D2000AFE400148E590018AE1900489258000CA8
61567+:107D3000A218004E920D000835AF0020A20F0008D7
61568+:107D40008E090018012E282434AC4000AE0C001817
61569+:107D5000920B0000317200FF1253027F2403FF8058
61570+:107D60003C04080024845BE80E0008AA0000000020
61571+:107D70003C1108008E315BE80E00087202202021C1
61572+:107D80002405000424080001A2050025022020216A
61573+:107D90000E00087CA20800053C0580008CB001782C
61574+:107DA0000600FFFE8F92005834AE0140240F0002FF
61575+:107DB0003C091000ADD10000A1CF0004ACA90178AE
61576+:107DC0000A000962AF8000682CAD003751A0FF9413
61577+:107DD0008F8D0044000580803C110800263157E05B
61578+:107DE000021178218DEE000001C0000800000000A3
61579+:107DF0002411000414B1008C3C0780003C080800EA
61580+:107E00008D085BE88F86FD9CACE800208E4500085D
61581+:107E10008F99FDA0240D0050ACC500308E4C000899
61582+:107E2000ACCC00508E4B000CACCB00348E43001019
61583+:107E3000ACC300388E4A0010ACCA00548E42001405
61584+:107E4000ACC2003C8E5F0018AF3F00048E50001C97
61585+:107E5000ACD0002090C40000309800FF130D024AFF
61586+:107E6000000000008CC400348CD00030009030231F
61587+:107E700004C000F12404008C126000EE2402000310
61588+:107E80000A000962AF8200682419000514B900666F
61589+:107E90003C0580003C0808008D085BE88F86FD9C4F
61590+:107EA000ACA800208E4C00048F8AFDA0240720007F
61591+:107EB000ACCC001C924B000824120008A14B001906
61592+:107EC0008F82005890430009A14300188F85005805
61593+:107ED00090BF000A33E400FF1092001028890009C7
61594+:107EE000152000BA240E0002240D0020108D000B76
61595+:107EF000340780002898002117000008240740005C
61596+:107F000024100040109000053C0700012419008057
61597+:107F1000109900023C070002240740008CC20018A0
61598+:107F20003C03FF00004350240147F825ACDF001854
61599+:107F300090B2000BA0D200278F8300589464000CED
61600+:107F4000108001FE000000009467000C3C1F8000C0
61601+:107F50002405FFBFA4C7005C9063000E2407000443
61602+:107F6000A0C300088F820058904A000FA0CA0009E1
61603+:107F70008F8900588D3200108FE400740244C823AA
61604+:107F8000ACD900588D300014ACD0002C95380018B6
61605+:107F9000330DFFFFACCD00409531001A322FFFFFAB
61606+:107FA000ACCF00448D2E001CACCE00489128000EB2
61607+:107FB000A0C8000890CC000801855824126001B6C2
61608+:107FC000A0CB00088F9200580A000962AF870068B2
61609+:107FD0002406000614A600143C0E80003C0F080086
61610+:107FE0008DEF5BE88F85FD98ADCF00208E4900189E
61611+:107FF0008F86FD9C8F8BFDA0ACA900008CC800383B
61612+:1080000024040005ACA800048CCC003C1260008164
61613+:10801000AD6C00000A000962AF84006824110007FB
61614+:1080200010B1004B240400063C05080024A55BE8C1
61615+:108030000E000881240400818F9200580013102B39
61616+:108040000A000962AF820068241F002314BFFFF6F4
61617+:108050003C0C80003C0508008CA55BE88F8BFDA0E4
61618+:10806000AD8500208F91FD9C8E4600042564002084
61619+:1080700026450014AE260028240600030E000F81BA
61620+:10808000257000308F87005802002021240600034D
61621+:108090000E000F8124E500083C04080024845BE8FE
61622+:1080A0000E0008AA0000000092230000240A0050DD
61623+:1080B000306200FF544AFFE18F9200580E000F6CAF
61624+:1080C000000000000A000A6A8F920058240800335A
61625+:1080D00014A800323C0380003C1108008E315BE89C
61626+:1080E0008F8FFDA0AC7100208E420008240D002867
61627+:1080F0008F89FD9CADE200308E4A000C24060009F9
61628+:10810000ADEA00348E5F0010ADFF00388E440014DD
61629+:10811000ADE400208E590018ADF900248E58001CE3
61630+:10812000ADF80028A1ED00118E4E00041260003160
61631+:10813000AD2E00288F9200580A000962AF860068B1
61632+:10814000240D002214ADFFB8000000002404000735
61633+:108150003C1008008E105BE83C188000AF10002037
61634+:108160005660FEAEAF8400683C04080024845BE8DF
61635+:108170000E0008AA241300508F84FD9C90920000EA
61636+:10818000325900FF1333014B000000008F9200585A
61637+:10819000000020210A000962AF8400683C05080045
61638+:1081A00024A55BE80E000858240400810A000A6A2E
61639+:1081B0008F92005802D498213265FFFF0E000852BA
61640+:1081C000240400840A0009628F920058108EFF5325
61641+:1081D000240704002887000310E00179241100041B
61642+:1081E000240F0001548FFF4D240740000A000A228B
61643+:1081F000240701003C05080024A55BE80E0008A444
61644+:10820000240400828F920058000030210A00096285
61645+:10821000AF8600683C04080024845BE88CC2003808
61646+:108220000E0008AA8CC3003C8F9200580A000AC0B6
61647+:1082300000002021240400823C05080024A55BE8FE
61648+:108240000E0008A4000000008F92005800001021CA
61649+:108250000A000962AF8200688E5000048F91FD9C75
61650+:108260003C078000ACF00020922C00050200282181
61651+:10827000318B0002156001562404008A8F92FDA004
61652+:108280002404008D9245001B30A6002014C001502C
61653+:1082900002002821922E00092408001231C900FF93
61654+:1082A0001128014B240400810E00087202002021D5
61655+:1082B0009258001B240F000402002021370D0042B9
61656+:1082C000A24D001B0E00087CA22F00253C0580005B
61657+:1082D0008CA401780480FFFE34B90140241F000201
61658+:1082E000AF300000A33F00048F9200583C101000F4
61659+:1082F000ACB001780A000A6B0013102B8E500004FA
61660+:108300008F91FD9C3C038000AC700020922A0005F8
61661+:108310000200282131420002144000172404008A80
61662+:10832000922C00092412000402002821318B00FF46
61663+:1083300011720011240400810E0008720200202135
61664+:108340008F89FDA0240800122405FFFE912F001B39
61665+:108350000200202135EE0020A12E001BA2280009DA
61666+:108360009226000500C538240E00087CA2270005CF
61667+:1083700002002821000020210E0009330000000027
61668+:108380000A000A6A8F9200588E4C00043C07800055
61669+:108390003C10080026105BE8ACEC00203C01080013
61670+:1083A000AC2C5BE8924B0003317100041220013BBE
61671+:1083B0008F84FD9C24020006A0820009924F001BBE
61672+:1083C000240EFFC031E9003F012E4025A08800089F
61673+:1083D0009245000330A6000114C0013200000000E5
61674+:1083E0008E420008AE0200083C0208008C425BF09E
61675+:1083F000104001318F90FDA0000219C28F8DFD9CAD
61676+:10840000A603000C8E4A000C24180001240400145A
61677+:10841000AE0A002C8E420010AE02001C965F0016C1
61678+:10842000A61F003C96590014A619003EADB8000CDA
61679+:10843000A5B80010A5B80012A5B80014A5B800167C
61680+:1084400012600144A2040011925100033232000272
61681+:108450002E5300018F920058266200080A0009621C
61682+:10846000AF8200688E4400043C1980003C068008FE
61683+:10847000AF2400208E45000890D80000240D005045
61684+:10848000331100FF122D009C2407008824060009E8
61685+:108490000E000845000000000A000A6A8F9200588A
61686+:1084A0008E5000043C0980003C118008AD30002053
61687+:1084B0009228000024050050310400FF10850110AF
61688+:1084C0002407008802002021000028210E00084512
61689+:1084D0002406000E922D00002418FF80020028219F
61690+:1084E00001B8802524040004240600300E0007256E
61691+:1084F000A23000000A000A6A8F9200588E500004D1
61692+:108500008F91FDA03C028000AC500020923F001BE8
61693+:1085100033F900101320006C240700810200202191
61694+:10852000000028212406001F0E000845000000005E
61695+:108530000A000A6A8F9200588E44001C0E00085DE3
61696+:1085400000000000104000E3004048218F880058E0
61697+:1085500024070089012020218D05001C240600012C
61698+:108560000E000845000000000A000A6A8F920058B9
61699+:10857000964900023C10080026105BE831280004F0
61700+:10858000110000973C0460008E4E001C3C0F8000E0
61701+:10859000ADEE00203C010800AC2E5BE896470002DF
61702+:1085A00030E40001148000E6000000008E42000468
61703+:1085B000AE0200083C1008008E105BF0120000ECC8
61704+:1085C0003C0F80008F92FD9C241000018E4E0018FD
61705+:1085D0008F8DFDA08F9FFD9801CF4825AE490018D3
61706+:1085E000A2400005AE50000C3C0808008D085BF06E
61707+:1085F0008F840058A6500010000839C2A6500012FF
61708+:10860000A6500014A6500016A5A7000C8C8C0008DC
61709+:108610008F8B00588F8A0058ADAC002C8D63000CF6
61710+:1086200024070002ADA3001C91460010A1A6001172
61711+:108630008F82005890450011A3E500088F990058DB
61712+:1086400093380012A258004E8F910058922F0013B9
61713+:10865000A1AF00128F920058964E0014A5AE003CB8
61714+:1086600096490016A5A9003E8E480018ADA8001432
61715+:108670005660FD6AAF8700683C05080024A55BE8EA
61716+:108680000E000881000020218F9200580000382140
61717+:108690000A000962AF8700683C05080024A55BE872
61718+:1086A0000E0008A4240400828F9200580A000A4D8C
61719+:1086B000000038210E000F6C000000008F9200585F
61720+:1086C0000A000AC0000020210E00087202002021CA
61721+:1086D0009223001B02002021346A00100E00087C47
61722+:1086E000A22A001B000038210200202100002821BE
61723+:1086F0000A000BA52406001F9242000C305F000107
61724+:1087000013E0000300000000964A000EA4CA002CEB
61725+:10871000924B000C316300025060000600003821CB
61726+:108720008E470014964C0012ACC7001CA4CC001A53
61727+:10873000000038210A000B7F240600093C050800D0
61728+:1087400024A55BE80E0008A42404008B8F92005837
61729+:108750000A000A4D0013382B3C0C08008D8C5BE896
61730+:1087600024DFFFFE25930100326B007F016790211B
61731+:1087700002638824AD110028AE4600E0AE4000E45C
61732+:108780000A0009B3AE5F001CACC000543C0D0800E9
61733+:108790008DAD5BE83C18800C37090100ACED00287A
61734+:1087A0008E510014AD3100E08E4F0014AD2F00E467
61735+:1087B0008E4E001025C7FFFE0A0009F4AD27001CED
61736+:1087C0005491FDD6240740000A000A222407100015
61737+:1087D0000E00092D000000000A000A6A8F9200585E
61738+:1087E0008C83442C3C12DEAD3651BEEF3C010800B8
61739+:1087F000AC205BE810710062000000003C196C6264
61740+:1088000037387970147800082404000297850074C2
61741+:108810009782006C2404009200A2F82B13E0001948
61742+:1088200002002821240400020E00069524050200FF
61743+:108830003C068000ACC200203C010800AC225BE892
61744+:108840001040000D8F8C0058240A002824040003D7
61745+:10885000918B0010316300FF546A00012404000171
61746+:108860000E0000810000000010400004240400837A
61747+:108870000A000BC28F920058240400833C050800B4
61748+:1088800024A55BE80E000881000000008F920058CC
61749+:108890000013382B0A000962AF8700680A000B49F1
61750+:1088A000240200128E4400080E00085D0000000043
61751+:1088B0000A000B55AE0200083C05080024A55BE841
61752+:1088C0000E000858240400878F9200580A000B728B
61753+:1088D0000013102B240400040E000695240500301C
61754+:1088E0001440002A004048218F8800582407008344
61755+:1088F000012020218D05001C0A000BB32406000175
61756+:108900008F8300788F8600701066FEEE000038219D
61757+:108910003C07080024E75B6C000320C00087282187
61758+:108920008CAE000011D0005D246F000131E3000F18
61759+:108930005466FFFA000320C00A000B8C00003821A7
61760+:108940008E4400040E00085D000000000A000BC801
61761+:10895000AE0200083C05080024A55BE80E0008A450
61762+:10896000240400828F9200580A000B72000010212C
61763+:108970003C05080024A55BE80A000C7C2404008761
61764+:108980008C83442C0A000C5B3C196C628F88005865
61765+:108990003C0780083C0C8000240B0050240A000196
61766+:1089A000AD820020A0EB0000A0EA000191030004CA
61767+:1089B000A0E3001891040005A0E400199106000648
61768+:1089C0003C04080024845B6CA0E6001A91020007B6
61769+:1089D0003C06080024C65B68A0E2001B9105000865
61770+:1089E000A0E5001C911F0009A0FF001D9119000ABD
61771+:1089F000A0F9001E9118000BA0F8001F9112000CA6
61772+:108A0000A0F200209111000DA0F100219110000EA4
61773+:108A1000A0F00022910F000FA0EF0023910E001094
61774+:108A2000A0EE0024910D0011A0ED0025950C00147E
61775+:108A3000A4EC0028950B00168F8A00708F920078A6
61776+:108A4000A4EB002A95030018000A10C02545000178
61777+:108A5000A4E3002C8D1F001C0044C0210046C82147
61778+:108A600030A5000FAF3F0000AF09000010B20006B4
61779+:108A7000AF850070000038218D05001C01202021E9
61780+:108A80000A000BB32406000124AD000131A7000F3A
61781+:108A9000AF8700780A000CF9000038213C06080076
61782+:108AA00024C65B680086902100003821ACA000003D
61783+:108AB0000A000B8CAE4000003C0482013C036000C5
61784+:108AC00034820E02AC603D68AF80009803E000087D
61785+:108AD000AC623D6C27BDFFE8AFB000103090FFFFE7
61786+:108AE000001018422C620041AFBF00141440000275
61787+:108AF00024040080240300403C010800AC300060E6
61788+:108B00003C010800AC2300640E000F7500602821B2
61789+:108B1000244802BF2409FF8001092824001039805D
61790+:108B2000001030408FBF00148FB0001000A720212C
61791+:108B300000861821AF8300803C010800AC25005856
61792+:108B40003C010800AC24005C03E0000827BD0018CD
61793+:108B5000308300FF30C6FFFF30E400FF3C08800098
61794+:108B60008D0201B80440FFFE000354000144382583
61795+:108B70003C09600000E920253C031000AD050180A0
61796+:108B8000AD060184AD04018803E00008AD0301B81F
61797+:108B90008F8500583C0A6012354800108CAC0004E8
61798+:108BA0003C0D600E35A60010318B00062D690001CA
61799+:108BB000AD0900C48CA70004ACC731808CA20008AA
61800+:108BC00094A40002ACC231848CA3001C0460000396
61801+:108BD000A784009003E00008000000008CAF00189C
61802+:108BE000ACCF31D08CAE001C03E00008ACCE31D449
61803+:108BF0008F8500588F87FF288F86FF308CAE00044A
61804+:108C00003C0F601235E80010ACEE00788CAD000827
61805+:108C1000ACED007C8CAC0010ACCC004C8CAB000CF0
61806+:108C2000ACCB004894CA00543C0208008C4200447B
61807+:108C300025490001A4C9005494C400543083FFFFA7
61808+:108C400010620017000000003C0208008C42004047
61809+:108C5000A4C200528CA30018ACE300308CA2001414
61810+:108C6000ACE2002C8CB90018ACF900388CB80014B8
61811+:108C700024050001ACF800348D0600BC50C5001975
61812+:108C80008D0200B48D0200B8A4E2004894E40048CC
61813+:108C9000A4E4004A94E800EA03E000083102FFFF80
61814+:108CA0003C0208008C420024A4C00054A4C200521C
61815+:108CB0008CA30018ACE300308CA20014ACE2002CB2
61816+:108CC0008CB90018ACF900388CB8001424050001E8
61817+:108CD000ACF800348D0600BC54C5FFEB8D0200B823
61818+:108CE0008D0200B4A4E2004894E40048A4E4004AE1
61819+:108CF00094E800EA03E000083102FFFF8F86005885
61820+:108D00003C0480008CC900088CC80008000929C0F8
61821+:108D1000000839C0AC87002090C30007306200040F
61822+:108D20001040003EAF85009490CB0007316A0008E8
61823+:108D30001140003D8F87FF2C8CCD000C8CCE001491
61824+:108D400001AE602B11800036000000008CC2000CC8
61825+:108D5000ACE200708CCB00188F85FF288F88FF3025
61826+:108D6000ACEB00748CCA00102402FFF8ACAA00D847
61827+:108D70008CC9000CAD0900608CC4001CACA400D0F0
61828+:108D800090E3007C0062C824A0F9007C90D8000722
61829+:108D9000330F000811E000040000000090ED007C9B
61830+:108DA00035AC0001A0EC007C90CF000731EE000153
61831+:108DB00011C000060000000090E3007C241800347D
61832+:108DC00034790002A0F9007CACB800DC90C2000746
61833+:108DD0003046000210C000040000000090E8007C53
61834+:108DE00035040004A0E4007C90ED007D3C0B600E97
61835+:108DF000356A001031AC003FA0EC007D8D4931D4C4
61836+:108E00003127000110E00002240E0001A0AE00098D
61837+:108E100094AF00EA03E0000831E2FFFF8F87FF2CE8
61838+:108E20000A000DAF8CC200140A000DB0ACE0007057
61839+:108E30008F8C005827BDFFD8AFB3001CAFB200180D
61840+:108E4000AFB00010AFBF0020AFB10014918F00157C
61841+:108E50003C13600E3673001031EB000FA38B009CA7
61842+:108E60008D8F00048D8B0008959F0012959900103E
61843+:108E70009584001A9598001E958E001C33EDFFFF17
61844+:108E8000332AFFFF3089FFFF3308FFFF31C7FFFFA1
61845+:108E90003C010800AC2D00243C010800AC29004432
61846+:108EA0003C010800AC2A0040AE683178AE67317CE6
61847+:108EB00091850015959100163C12601236520010F3
61848+:108EC00030A200FF3230FFFFAE623188AE5000B4F6
61849+:108ED00091830014959F0018240600010066C804C1
61850+:108EE00033F8FFFFAE5900B8AE5800BC918E0014A5
61851+:108EF000AF8F00843C08600631CD00FFAE4D00C04E
61852+:108F0000918A00159584000E3C07600A314900FFE4
61853+:108F1000AF8B00883084FFFFAE4900C835110010C8
61854+:108F20000E000D1034F004103C0208008C4200606A
61855+:108F30003C0308008C6300643C0608008CC60058A3
61856+:108F40003C0508008CA5005C8F8400808FBF00204A
61857+:108F5000AE23004CAE65319CAE030054AE4500DC40
61858+:108F6000AE6231A0AE6331A4AE663198AE22004845
61859+:108F70008FB3001CAE0200508FB10014AE4200E06F
61860+:108F8000AE4300E4AE4600D88FB000108FB2001898
61861+:108F90000A00057D27BD0028978500929783007CF5
61862+:108FA00027BDFFE8AFB0001000A3102BAFBF001427
61863+:108FB000240400058F900058104000552409000239
61864+:108FC0000E0006958F850080AF8200942404000374
61865+:108FD0001040004F240900023C0680000E00008172
61866+:108FE000ACC2002024070001240820001040004DDE
61867+:108FF00024040005978E00928F8AFF2C24090050CC
61868+:1090000025C50001A7850092A14900003C0D08007C
61869+:109010008DAD0064240380008F84FF28000D66005E
61870+:10902000AD4C0018A5400006954B000A8F85FF3017
61871+:109030002402FF8001633024A546000A915F000AE4
61872+:109040000000482103E2C825A159000AA0A0000899
61873+:10905000A140004CA08000D5961800029783009094
61874+:109060003C020004A49800EA960F00022418FFBFF7
61875+:1090700025EE2401A48E00BE8E0D0004ACAD00448C
61876+:109080008E0C0008ACAC0040A4A00050A4A000547A
61877+:109090008E0B000C240C0030AC8B00288E060010C8
61878+:1090A000AC860024A480003EA487004EA487005014
61879+:1090B000A483003CAD420074AC8800D8ACA800602A
61880+:1090C000A08700FC909F00D433F9007FA09900D4C2
61881+:1090D000909000D402187824A08F00D4914E007C88
61882+:1090E00035CD0001A14D007C938B009CAD480070F4
61883+:1090F000AC8C00DCA08B00D68F8800888F87008422
61884+:10910000AC8800C4AC8700C8A5400078A540007AB0
61885+:109110008FBF00148FB000100120102103E0000861
61886+:1091200027BD00188F8500940E0007258F860080CC
61887+:109130000A000E9F2409000227BDFFE0AFB0001017
61888+:109140008F900058AFB10014AFBF00188E09000413
61889+:109150000E00054A000921C08E0800048F84FF28F4
61890+:109160008F82FF30000839C03C068000ACC7002069
61891+:10917000948500EA904300131460001C30B1FFFF97
61892+:109180008F8CFF2C918B0008316A00401540000B3A
61893+:10919000000000008E0D0004022030218FBF001857
61894+:1091A0008FB100148FB00010240400220000382179
61895+:1091B000000D29C00A000D2F27BD00200E000098C9
61896+:1091C000000000008E0D0004022030218FBF001827
61897+:1091D0008FB100148FB00010240400220000382149
61898+:1091E000000D29C00A000D2F27BD00200E000090A1
61899+:1091F000000000008E0D0004022030218FBF0018F7
61900+:109200008FB100148FB00010240400220000382118
61901+:10921000000D29C00A000D2F27BD002027BDFFE04B
61902+:10922000AFB200183092FFFFAFB00010AFBF001C0C
61903+:10923000AFB100141240001E000080218F8600583C
61904+:109240008CC500002403000600053F02000514023F
61905+:1092500030E4000714830016304500FF2CA80006F8
61906+:1092600011000040000558803C0C0800258C58BCBB
61907+:10927000016C50218D490000012000080000000011
61908+:109280008F8E0098240D000111CD005024020002A1
61909+:10929000AF820098260900013130FFFF24C800206A
61910+:1092A0000212202B010030211480FFE5AF88005806
61911+:1092B000020010218FBF001C8FB200188FB1001464
61912+:1092C0008FB0001003E0000827BD00209387007EC8
61913+:1092D00054E00034000030210E000DE700000000D3
61914+:1092E0008F8600580A000EFF240200018F87009825
61915+:1092F0002405000210E50031240400130000282199
61916+:1093000000003021240700010E000D2F0000000096
61917+:109310000A000F008F8600588F83009824020002F5
61918+:109320001462FFF6240400120E000D9A00000000E3
61919+:109330008F85009400403021240400120E000D2F70
61920+:10934000000038210A000F008F8600588F83009894
61921+:109350002411000310710029241F0002107FFFCE8A
61922+:1093600026090001240400100000282100003021FB
61923+:109370000A000F1D240700018F91009824060002A7
61924+:109380001626FFF9240400100E000E410000000014
61925+:10939000144000238F9800588F8600580A000EFF53
61926+:1093A00024020003240400140E000D2F00002821C5
61927+:1093B0008F8600580A000EFF240200020E000EA93C
61928+:1093C000000000000A000F008F8600580E000D3FBD
61929+:1093D00000000000241900022404001400002821C9
61930+:1093E0000000302100003821AF9900980E000D2FA9
61931+:1093F000000000000A000F008F8600580E000D5775
61932+:10940000000000008F8500942419000200403021E4
61933+:1094100024040010000038210A000F56AF9900986C
61934+:109420000040382124040010970F0002000028217A
61935+:109430000E000D2F31E6FFFF8F8600580A000F0047
61936+:10944000AF9100988F84FF2C3C077FFF34E6FFFF2D
61937+:109450008C8500182402000100A61824AC83001893
61938+:1094600003E00008A08200053084FFFF30A5FFFF65
61939+:109470001080000700001821308200011040000217
61940+:1094800000042042006518211480FFFB00052840DD
61941+:1094900003E000080060102110C000070000000079
61942+:1094A0008CA2000024C6FFFF24A50004AC820000AB
61943+:1094B00014C0FFFB2484000403E000080000000047
61944+:1094C00010A0000824A3FFFFAC86000000000000ED
61945+:1094D000000000002402FFFF2463FFFF1462FFFA74
61946+:1094E0002484000403E0000800000000000411C010
61947+:1094F00003E000082442024027BDFFE8AFB000109F
61948+:1095000000808021AFBF00140E000F9600A0202124
61949+:1095100000504821240AFF808FBF00148FB0001034
61950+:10952000012A30243127007F3C08800A3C042100B6
61951+:1095300000E8102100C428253C03800027BD001846
61952+:10954000AC650024AF820038AC400000AC6500245C
61953+:1095500003E00008AC4000403C0D08008DAD005811
61954+:1095600000056180240AFF8001A45821016C482174
61955+:10957000012A30243127007F3C08800C3C04210064
61956+:1095800000E8102100C428253C038000AC650028B9
61957+:10959000AF82003403E00008AC40002430A5FFFF98
61958+:1095A0003C0680008CC201B80440FFFE3C086015F8
61959+:1095B00000A838253C031000ACC40180ACC0018475
61960+:1095C000ACC7018803E00008ACC301B83C0D08003B
61961+:1095D0008DAD005800056180240AFF8001A4582148
61962+:1095E000016C4021010A4824000931403107007F05
61963+:1095F00000C728253C04200000A418253C02800058
61964+:10960000AC43083003E00008AF80003427BDFFE81A
61965+:10961000AFB0001000808021AFBF00140E000F9685
61966+:1096200000A0202100504821240BFF80012B502452
61967+:10963000000A39403128007F3C0620008FBF00140B
61968+:109640008FB0001000E8282534C2000100A21825C0
61969+:109650003C04800027BD0018AC83083003E00008FC
61970+:10966000AF8000383C0580088CA700603C0680086D
61971+:109670000087102B144000112C8340008CA8006040
61972+:109680002D0340001060000F240340008CC90060CF
61973+:109690000089282B14A00002008018218CC30060D0
61974+:1096A00000035A42000B30803C0A0800254A59202A
61975+:1096B00000CA202103E000088C8200001460FFF340
61976+:1096C0002403400000035A42000B30803C0A08008B
61977+:1096D000254A592000CA202103E000088C8200009E
61978+:1096E0003C05800890A60008938400AB24C20001CA
61979+:1096F000304200FF3043007F1064000C0002382726
61980+:10970000A0A200083C0480008C85017804A0FFFE24
61981+:109710008F8A00A0240900023C081000AC8A014096
61982+:10972000A089014403E00008AC8801780A00101BFE
61983+:1097300030E2008027BDFFD8AFB200188F9200A49E
61984+:10974000AFBF0020AFB3001CAFB00010AFB100142A
61985+:109750008F9300348E5900283C1000803C0EFFEFA0
61986+:10976000AE7900008E580024A260000A35CDFFFFBC
61987+:10977000AE7800049251002C3C0BFF9F356AFFFF2E
61988+:10978000A271000C8E6F000C3C080040A271000B0F
61989+:1097900001F06025018D4824012A382400E8302595
61990+:1097A000AE66000C8E450004AE6000183C0400FF5D
61991+:1097B000AE6500148E43002C3482FFFFA6600008C3
61992+:1097C0000062F824AE7F00108E5900088F9000A030
61993+:1097D000964E0012AE7900208E51000C31D83FFF1A
61994+:1097E00000187980AE7100248E4D001401F06021C4
61995+:1097F00031CB0001AE6D00288E4A0018000C41C22A
61996+:10980000000B4B80AE6A002C8E46001C01093821EB
61997+:10981000A667001CAE660030964500028E4400200C
61998+:10982000A665001EAE64003492430033306200042B
61999+:1098300054400006924700003C0280083443010077
62000+:109840008C7F00D0AE7F0030924700008F860038BA
62001+:10985000A0C700309245003330A4000250800007BA
62002+:10986000925100018F880038240BFF80910A00304C
62003+:10987000014B4825A1090030925100018F9000381A
62004+:10988000240CFFBF2404FFDFA21100318F8D0038AC
62005+:109890003C1880083711008091AF003C31EE007F0A
62006+:1098A000A1AE003C8F890038912B003C016C502404
62007+:1098B000A12A003C8F9F00388E68001493E6003C7C
62008+:1098C0002D0700010007114000C4282400A218251C
62009+:1098D000A3E3003C8F87003896590012A4F90032A8
62010+:1098E0008E450004922E007C30B0000300107823D7
62011+:1098F00031ED000300AD102131CC000215800002D3
62012+:1099000024460034244600303C0280083443008062
62013+:10991000907F007C00BFC824333800041700000289
62014+:1099200024C2000400C010218F98003824190002BE
62015+:10993000ACE20034A3190000924F003F8F8E003834
62016+:109940003C0C8008358B0080A1CF00018F9100383E
62017+:10995000924D003F8E440004A62D0002956A005CE3
62018+:109960000E000FF43150FFFF00024B800209382532
62019+:109970003C08420000E82825AE2500048E4400384B
62020+:109980008F850038ACA400188E460034ACA6001CAD
62021+:10999000ACA0000CACA00010A4A00014A4A0001661
62022+:1099A000A4A00020A4A00022ACA000248E62001479
62023+:1099B00050400001240200018FBF00208FB3001C23
62024+:1099C0008FB200188FB100148FB00010ACA2000845
62025+:1099D0000A00101327BD002827BDFFC83C058008DA
62026+:1099E00034A40080AFBF0034AFBE0030AFB7002C4E
62027+:1099F000AFB60028AFB50024AFB40020AFB3001C51
62028+:109A0000AFB20018AFB10014AFB00010948300786B
62029+:109A10009482007A104300512405FFFF0080F0215A
62030+:109A20000A0011230080B821108B004D8FBF003435
62031+:109A30008F8600A03C1808008F18005C2411FF805E
62032+:109A40003C1680000306782101F18024AED0002C62
62033+:109A500096EE007A31EC007F3C0D800E31CB7FFF1B
62034+:109A6000018D5021000B4840012AA82196A4000036
62035+:109A70003C0808008D0800582405FF8030953FFF02
62036+:109A800001061821001539800067C8210325F82434
62037+:109A90003C02010003E290253338007F3C11800C2A
62038+:109AA000AED20028031190219250000D320F000415
62039+:109AB00011E0003702E0982196E3007A96E8007AF8
62040+:109AC00096E5007A2404800031077FFF24E300013B
62041+:109AD00030627FFF00A4F82403E2C825A6F9007ACB
62042+:109AE00096E6007A3C1408008E94006030D67FFF22
62043+:109AF00012D400C1000000008E5800188F8400A00E
62044+:109B000002A028212713FFFF0E000FCEAE53002C1A
62045+:109B100097D5007897D4007A12950010000028217C
62046+:109B20003C098008352401003C0A8008914800085F
62047+:109B3000908700D53114007F30E400FF0284302B81
62048+:109B400014C0FFB9268B0001938E00AB268C000158
62049+:109B5000008E682115ACFFB78F8600A08FBF003440
62050+:109B60008FBE00308FB7002C8FB600288FB5002431
62051+:109B70008FB400208FB3001C8FB200188FB1001477
62052+:109B80008FB0001000A0102103E0000827BD0038AE
62053+:109B900000C020210E000F99028028218E4B00105A
62054+:109BA0008E4C00308F84003824090002016C502351
62055+:109BB000AE4A0010A089000096E3005C8E4400309D
62056+:109BC0008F9100380E000FF43070FFFF00024380C9
62057+:109BD000020838253C02420000E22825AE25000498
62058+:109BE0008E5F00048F8A00388E590000240B000815
62059+:109BF000AD5F001CAD590018AD40000CAD40001029
62060+:109C00009246000A240400052408C00030D000FF5A
62061+:109C1000A550001496580008A55800169251000A45
62062+:109C20003C188008322F00FFA54F0020964E0008F8
62063+:109C300037110100A54E0022AD400024924D000BCB
62064+:109C400031AC00FFA54C0002A14B00018E49003051
62065+:109C50008F830038240BFFBFAC690008A06400307C
62066+:109C60008F9000382403FFDF9607003200E8282495
62067+:109C700000B51025A6020032921F003233F9003FD2
62068+:109C800037260040A20600328F8C0038AD800034A9
62069+:109C90008E2F00D0AD8F0038918E003C3C0F7FFF9F
62070+:109CA00031CD007FA18D003C8F84003835EEFFFF61
62071+:109CB000908A003C014B4824A089003C8F850038E5
62072+:109CC00090A8003C01033824A0A7003C8E42003439
62073+:109CD0008F9100383C038008AE2200408E59002C42
62074+:109CE0008E5F0030033F3023AE26004492300048A0
62075+:109CF0003218007FA23800488F8800388E4D00301F
62076+:109D00008D0C004801AE582401965024014B482583
62077+:109D1000AD0900489244000AA104004C964700088F
62078+:109D20008F850038A4A7004E8E5000308E4400303E
62079+:109D30000E0003818C65006092F9007C0002F940FE
62080+:109D4000004028210002110003E2302133360002D6
62081+:109D500012C00003020680210005B0800216802197
62082+:109D6000926D007C31B30004126000020005708027
62083+:109D7000020E80218E4B00308F8800382405800031
62084+:109D8000316A0003000A4823312400030204182129
62085+:109D9000AD03003496E4007A96F0007A96F1007AEA
62086+:109DA00032027FFF2447000130FF7FFF0225C824D5
62087+:109DB000033F3025A6E6007A96F8007A3C120800A8
62088+:109DC0008E520060330F7FFF11F200180000000078
62089+:109DD0008F8400A00E000FCE02A028218F8400A047
62090+:109DE0000E000FDE028028210E001013000000007C
62091+:109DF0000A00111F0000000096F1007A022480245E
62092+:109E0000A6F0007A92EF007A92EB007A31EE00FF32
62093+:109E1000000E69C2000D6027000C51C03169007F3F
62094+:109E2000012A20250A001119A2E4007A96E6007A98
62095+:109E300000C5C024A6F8007A92EF007A92F3007A67
62096+:109E400031F200FF001271C2000E6827000DB1C090
62097+:109E5000326C007F01962825A2E5007A0A0011D015
62098+:109E60008F8400A03C0380003084FFFF30A5FFFFFB
62099+:109E7000AC640018AC65001C03E000088C620014A0
62100+:109E800027BDFFA03C068008AFBF005CAFBE0058F6
62101+:109E9000AFB70054AFB60050AFB5004CAFB40048F8
62102+:109EA000AFB30044AFB20040AFB1003CAFB0003838
62103+:109EB00034C80100910500D590C700083084FFFF29
62104+:109EC00030A500FF30E2007F0045182AAFA4001043
62105+:109ED000A7A00018A7A0002610600055AFA000148E
62106+:109EE00090CA00083149007F00A9302324D3FFFF26
62107+:109EF0000013802B8FB400100014902B02128824C2
62108+:109F0000522000888FB300143C03800894790052DB
62109+:109F1000947E00508FB60010033EC0230018BC0092
62110+:109F2000001714030016FC0002C2A82A16A00002A3
62111+:109F3000001F2C030040282100133C0000072403CD
62112+:109F400000A4102A5440000100A020212885000907
62113+:109F500014A000020080A021241400083C0C8008FA
62114+:109F60008D860048001459808D88004C3C03800089
62115+:109F70003169FFFF3C0A0010012A202534710400DA
62116+:109F8000AC660038AF9100A4AC68003CAC64003013
62117+:109F900000000000000000000000000000000000C1
62118+:109FA00000000000000000000000000000000000B1
62119+:109FB0008C6E000031CD002011A0FFFD0014782A26
62120+:109FC00001F01024104000390000A8213C16800840
62121+:109FD00092D700083C1280008E44010032F6007FC8
62122+:109FE0000E000F9902C028218E3900108E44010006
62123+:109FF0000000902133373FFF0E000FB102E028210F
62124+:10A00000923800003302003F2C500008520000102C
62125+:10A0100000008821000210803C030800246358E4FB
62126+:10A020000043F8218FFE000003C00008000000007C
62127+:10A0300090CF0008938C00AB31EE007F00AE682318
62128+:10A04000018D58210A0012172573FFFF0000882197
62129+:10A050003C1E80008FC401000E000FCE02E02821BC
62130+:10A060008FC401000E000FDE02C028211220000F55
62131+:10A070000013802B8F8B00A426A400010004AC00E9
62132+:10A08000027298230015AC032578004002B4B02A70
62133+:10A090000013802B241700010300882102D0102414
62134+:10A0A000AF9800A41440FFC9AFB700143C07800864
62135+:10A0B00094E200508FAE00103C05800002A288217F
62136+:10A0C0003C060020A4F10050ACA6003094F40050EF
62137+:10A0D00094EF005201D51823306CFFFF11F4001EDD
62138+:10A0E000AFAC00108CEF004C001561808CF500487F
62139+:10A0F00001EC28210000202100AC582B02A4C02133
62140+:10A10000030BB021ACE5004CACF600488FB4001056
62141+:10A110000014902B021288241620FF7C3C03800838
62142+:10A120008FB300148FBF005C8FBE00583A620001ED
62143+:10A130008FB700548FB600508FB5004C8FB40048D5
62144+:10A140008FB300448FB200408FB1003C8FB0003815
62145+:10A1500003E0000827BD006094FE00548CF2004428
62146+:10A1600033C9FFFE0009C8C00259F821ACBF003C4A
62147+:10A170008CE800448CAD003C010D50231940003B9D
62148+:10A18000000000008CF7004026E20001ACA200387D
62149+:10A190003C05005034A700103C038000AC67003041
62150+:10A1A00000000000000000000000000000000000AF
62151+:10A1B000000000000000000000000000000000009F
62152+:10A1C0008C7800003316002012C0FFFD3C1180087F
62153+:10A1D000962200543C1580003C068008304E000159
62154+:10A1E000000E18C0007578218DEC04003C070800B3
62155+:10A1F0008CE700443C040020ACCC00488DF40404FF
62156+:10A20000240B0001ACD4004C10EB0260AEA4003073
62157+:10A21000963900523C0508008CA5004000B99021F9
62158+:10A22000A6320052963F005427ED0001A62D00549F
62159+:10A230009626005430C4FFFF5487FF2F8FB40010C0
62160+:10A2400030A5FFFF0E0011F4A62000543C070800C3
62161+:10A250008CE70024963E00520047B82303D74823DA
62162+:10A26000A62900520A0012198FB400108CE2004097
62163+:10A270000A0012BE00000000922400012407000121
62164+:10A280003085007F14A7001C97AD00268E2B00148C
62165+:10A29000240CC000316A3FFF01AC48243C06080092
62166+:10A2A0008CC60060012A402531043FFF0086882BC0
62167+:10A2B00012200011A7A800263C0508008CA5005814
62168+:10A2C0008F9100A0000439802402FF8000B1182182
62169+:10A2D0000067F82103E2F02433F8007F3C1280008D
62170+:10A2E0003C19800EAE5E002C0319702191D0000D38
62171+:10A2F000360F0004A1CF000D0E001028241200011B
62172+:10A30000241100013C1E80008FC401000E000FCEFE
62173+:10A3100002E028218FC401000E000FDE02C02821B8
62174+:10A320001620FF558F8B00A40A0012860013802B85
62175+:10A330008F8600A490C80001310400201080019194
62176+:10A34000241000013C048008348B0080916A007C5A
62177+:10A350008F9E0034AFA0002C314900011120000F66
62178+:10A36000AFB000288CCD00148C8E006001AE602B45
62179+:10A370001580000201A038218C8700603C188008FD
62180+:10A38000370300808C70007000F0782B15E000021D
62181+:10A3900000E020218C640070AFA4002C3C028008F7
62182+:10A3A000344500808CD200148CBF0070025FC82B33
62183+:10A3B00017200002024020218CA400708FA7002CDF
62184+:10A3C0000087182310600003AFA3003024050002AB
62185+:10A3D000AFA500288FA400280264882B162000BA9D
62186+:10A3E000000018218CD000388FCE000C3C0F00806C
62187+:10A3F000AFD000008CCD00343C0CFF9F01CF58251E
62188+:10A40000AFCD000490CA003F3586FFFF01662024CF
62189+:10A410003C0900203C08FFEFA3CA000B0089382547
62190+:10A420003511FFFF00F118243C0500088F8700A4B8
62191+:10A430000065C825AFD9000C8CE20014AFC000182D
62192+:10A440008FA60030AFC200148CF800188FB0002C1B
62193+:10A450003C1FFFFBAFD8001C8CEF000837F2FFFF5A
62194+:10A4600003326824AFCF00248CEC000C020670216C
62195+:10A47000AFCD000CA7C00038A7C0003AAFCE002C6B
62196+:10A48000AFCC0020AFC000288CEA00148FAB002CAA
62197+:10A49000014B48230126402311000011AFC80010D2
62198+:10A4A00090EB003D8FC900048FC80000000B5100E5
62199+:10A4B000012A28210000102100AA882B010218215E
62200+:10A4C0000071F821AFC50004AFDF000090F2003D3D
62201+:10A4D000A3D2000A8F9900A497380006A7D80008D5
62202+:10A4E0008F910038240800023C038008A228000055
62203+:10A4F0003465008094BF005C8FA4002C33F0FFFF14
62204+:10A500000E000FF48F9200380002CB808F8500A4DC
62205+:10A51000021978253C18420001F87025AE4E00045F
62206+:10A520008F8400388CAD0038AC8D00188CAC0034B2
62207+:10A53000AC8C001CAC80000CAC800010A48000141B
62208+:10A54000A4800016A4800020A4800022AC800024F7
62209+:10A5500090A6003F8FA7002CA486000250E0019235
62210+:10A56000240700018FA200305040000290A2003D5D
62211+:10A5700090A2003E244A0001A08A00018F84003886
62212+:10A580008FA9002CAC8900083C128008364D008051
62213+:10A5900091AC007C3186000214C000022407003414
62214+:10A5A000240700308F8500A43C198008373F0080C5
62215+:10A5B00090B0000093F9007C240E0004A0900030BD
62216+:10A5C0008F8F00A48FB8002C8F8D003891F200017E
62217+:10A5D0003304000301C46023A1B200318F8E003820
62218+:10A5E0008F8600A42402C00095CA003294C90012CC
62219+:10A5F0008FAB002C0142402431233FFF010388250B
62220+:10A60000A5D1003291D000323185000300EBF82152
62221+:10A610003218003F370F0040A1CF00328FA4002C2A
62222+:10A6200003E5382133280004108000028F850038AC
62223+:10A6300000E838213C0A8008ACA700343549010005
62224+:10A640008D2800D08FA3002C2419FFBFACA80038A0
62225+:10A6500090B1003C2C640001240FFFDF3227007F03
62226+:10A66000A0A7003C8F98003800049140931F003C45
62227+:10A6700003F98024A310003C8F8C0038918E003C9D
62228+:10A6800001CF682401B23025A186003C8F8900A447
62229+:10A690008F8800388D2B0020AD0B00408D220024C8
62230+:10A6A000AD0200448D2A0028AD0A00488D23002CFD
62231+:10A6B0000E001013AD03004C8FB1002824070002D8
62232+:10A6C000122700118FA300280003282B00058023E8
62233+:10A6D0000270982400608021006090210A00126FAF
62234+:10A6E0000010882B962900128F8400A00000902172
62235+:10A6F0003125FFFFA7A900180E000FC22411000189
62236+:10A700000A00131D3C1E80003C0B80003C12800898
62237+:10A710008D640100924900088F92FF340E000F995A
62238+:10A720003125007F8F9900388FA700288FA4003033
62239+:10A73000A3270000965F005C33F0FFFF0E000FF4CC
62240+:10A740008F91003800026B80020D80253C0842008A
62241+:10A750008F8D00A402085025AE2A00048DA5003874
62242+:10A760008F8A003800007821000F1100AD450018D5
62243+:10A770008DB800343C047FFF3488FFFFAD58001CC7
62244+:10A7800091A6003E8D4C001C8D4900180006190052
62245+:10A79000000677020183C821004E58250323882B29
62246+:10A7A000012B382100F1F821AD59001CAD5F0018D4
62247+:10A7B000AD40000CAD40001091B0003E8FA40030C1
62248+:10A7C00024090005A550001495A500042419C00013
62249+:10A7D00000884024A545001691B8003EA5580020E9
62250+:10A7E00095AF0004A54F0022AD40002491AE003F7C
62251+:10A7F000A54E000291A6003E91AC003D01861023BB
62252+:10A80000244B0001A14B00018F9100388FA3003031
62253+:10A810003C028008344B0100AE230008A22900301E
62254+:10A820008F8C00388F8700A4959F003294F000121F
62255+:10A830002407FFBF033FC02432053FFF03057825EF
62256+:10A84000A58F0032918E00322418FFDF31CD003FFA
62257+:10A8500035A60040A18600328F910038240DFFFFFD
62258+:10A86000240CFF80AE2000348D6A00D0AE2A003860
62259+:10A870009223003C3069007FA229003C8F90003871
62260+:10A880003C0380009219003C0327F824A21F003CDF
62261+:10A890008F8E003891C5003C00B87824A1CF003CD1
62262+:10A8A0008F8A00383C0E8008AD4D00408FA6002CEA
62263+:10A8B000AD46004491420048004C5825A14B004849
62264+:10A8C0008F9000388F9900A48E09004801238824B6
62265+:10A8D00002283825AE070048933F003EA21F004CD7
62266+:10A8E0008F9800A48F8F003897050004A5E5004ECF
62267+:10A8F0000E0003818DC500609246007C8FAC003055
62268+:10A9000000026940000291000040282130CB000283
62269+:10A9100001B21021156000AA018230213C0E80088E
62270+:10A9200035C20080904C007C31830004106000032D
62271+:10A930008FB900300005788000CF3021241F00043B
62272+:10A940008F910038332D000303ED8023320800037C
62273+:10A9500000C85021AE2A00343C188000A7C500383A
62274+:10A960003C0680088F04010090DE00080E000FDE18
62275+:10A9700033C5007F0E001013000000000A00140D04
62276+:10A980008FA300288F9800348CC90038241F00033F
62277+:10A99000A7000008AF0900008CC50034A300000A1E
62278+:10A9A0008F9900A4AF0500043C080080932D003F60
62279+:10A9B000A31F000C8F0A000C3C02FF9FA30D000B8D
62280+:10A9C0000148F0253451FFFF3C12FFEF8F9900A49E
62281+:10A9D00003D170243646FFFF01C61824AF03000CD4
62282+:10A9E0008F2C0014972900128F8400A0AF0C001048
62283+:10A9F0008F2F0014AF000018AF000020AF0F00141D
62284+:10AA0000AF0000248F270018312F3FFF000F59801F
62285+:10AA1000AF0700288F2500080164F821312D0001BF
62286+:10AA2000AF0500308F31000C8F920038001F51C2EB
62287+:10AA3000000D438001481021241E00023C068008BE
62288+:10AA4000A702001CA7000034AF11002CA25E00007A
62289+:10AA500034D20080964E005C8F9900383C0342004F
62290+:10AA600031CCFFFF01833825AF2700048F8B00A472
62291+:10AA7000240500012402C0008D640038240700343E
62292+:10AA8000AF2400188D690034AF29001CAF20000CE2
62293+:10AA9000AF200010A7200014A7200016A720002038
62294+:10AAA000A7200022AF200024A7300002A325000128
62295+:10AAB0008F8800388F9F00A4AD10000893ED000030
62296+:10AAC000A10D00308F8A00A48F98003891510001A9
62297+:10AAD000A31100318F8B0038957E003203C27024A1
62298+:10AAE00001CF6025A56C0032916300323064003FD5
62299+:10AAF000A16400329249007C3125000214A00002BA
62300+:10AB00008F840038240700303C198008AC8700345B
62301+:10AB1000373201008E5F00D0240AFFBF020090216F
62302+:10AB2000AC9F0038908D003C31A8007FA088003C8D
62303+:10AB30008F9E003893C2003C004A8824A3D1003C79
62304+:10AB40008F8300380010882B9066003C34CE0020A4
62305+:10AB5000A06E003C8F8400A48F9800388C8C00205D
62306+:10AB6000AF0C00408C8F0024AF0F00448C8700286E
62307+:10AB7000AF0700488C8B002CAF0B004C0E0010135D
62308+:10AB80003C1E80000A0012700000000094C80052B1
62309+:10AB90003C0A08008D4A002401488821A4D10052B3
62310+:10ABA0000A0012198FB40010A08700018F840038AA
62311+:10ABB000240B0001AC8B00080A0013BE3C12800875
62312+:10ABC000000520800A0014A200C4302127BDFFE048
62313+:10ABD0003C0D8008AFB20018AFB00010AFBF001C32
62314+:10ABE000AFB1001435B200808E4C001835A80100BA
62315+:10ABF000964B000695A70050910900FC000C5602E8
62316+:10AC0000016728233143007F312600FF240200031F
62317+:10AC1000AF8300A8AF8400A010C2001B30B0FFFFBC
62318+:10AC2000910600FC2412000530C200FF10520033D0
62319+:10AC300000000000160000098FBF001C8FB2001832
62320+:10AC40008FB100148FB00010240D0C003C0C80005C
62321+:10AC500027BD002003E00008AD8D00240E0011FB8D
62322+:10AC6000020020218FBF001C8FB200188FB100148A
62323+:10AC70008FB00010240D0C003C0C800027BD00207C
62324+:10AC800003E00008AD8D0024965800789651007AB4
62325+:10AC9000924E007D0238782631E8FFFF31C400C0B3
62326+:10ACA000148000092D11000116000037000000007B
62327+:10ACB0005620FFE28FBF001C0E0010D100000000E4
62328+:10ACC0000A00156A8FBF001C1620FFDA0000000082
62329+:10ACD0000E0010D1000000001440FFD88FBF001CF0
62330+:10ACE0001600002200000000925F007D33E2003F6A
62331+:10ACF000A242007D0A00156A8FBF001C950900EA78
62332+:10AD00008F86008000802821240400050E0007257E
62333+:10AD10003130FFFF978300923C0480002465FFFFE1
62334+:10AD2000A78500928C8A01B80540FFFE0000000054
62335+:10AD3000AC8001808FBF001CAC9001848FB20018E2
62336+:10AD40008FB100148FB000103C0760133C0B100053
62337+:10AD5000240D0C003C0C800027BD0020AC8701882E
62338+:10AD6000AC8B01B803E00008AD8D00240E0011FB90
62339+:10AD7000020020215040FFB18FBF001C925F007D78
62340+:10AD80000A00159733E2003F0E0011FB020020215C
62341+:10AD90001440FFAA8FBF001C122000070000000013
62342+:10ADA0009259007D3330003F36020040A242007DC0
62343+:10ADB0000A00156A8FBF001C0E0010D100000000B1
62344+:10ADC0005040FF9E8FBF001C9259007D3330003FE2
62345+:10ADD0000A0015C636020040000000000000001BFB
62346+:10ADE0000000000F0000000A00000008000000063C
62347+:10ADF0000000000500000005000000040000000441
62348+:10AE00000000000300000003000000030000000336
62349+:10AE10000000000300000002000000020000000229
62350+:10AE2000000000020000000200000002000000021A
62351+:10AE3000000000020000000200000002000000020A
62352+:10AE400000000002000000020000000200000002FA
62353+:10AE50000000000100000001000000018008010066
62354+:10AE6000800800808008000000000C000000308096
62355+:10AE7000080011D00800127C08001294080012A8E3
62356+:10AE8000080012BC080011D0080011D0080012F010
62357+:10AE90000800132C080013400800138808001A8CBF
62358+:10AEA00008001A8C08001AC408001AC408001AD82E
62359+:10AEB00008001AA808001D0008001CCC08001D5836
62360+:10AEC00008001D5808001DE008001D108008024001
62361+:10AED000080027340800256C0800275C080027F4C8
62362+:10AEE0000800293C0800298808002AAC080029B479
62363+:10AEF00008002A38080025DC08002EDC08002EA4F3
62364+:10AF000008002588080025880800258808002B20CF
62365+:10AF100008002B20080025880800258808002DD06F
62366+:10AF2000080025880800258808002588080025884D
62367+:10AF300008002E0C080025880800258808002588B0
62368+:10AF4000080025880800258808002588080025882D
62369+:10AF5000080025880800258808002588080025881D
62370+:10AF6000080025880800258808002588080029A8E9
62371+:10AF7000080025880800258808002E680800258814
62372+:10AF800008002588080025880800258808002588ED
62373+:10AF900008002588080025880800258808002588DD
62374+:10AFA00008002588080025880800258808002588CD
62375+:10AFB00008002588080025880800258808002588BD
62376+:10AFC00008002CF4080025880800258808002C6853
62377+:10AFD00008002BC408003CE408003CB808003C848E
62378+:10AFE00008003C5808003C3808003BEC8008010091
62379+:10AFF00080080080800800008008008008004C6401
62380+:10B0000008004C9C08004BE408004C6408004C64A9
62381+:10B01000080049B808004C64080050500A000C842D
62382+:10B0200000000000000000000000000D7278703683
62383+:10B030002E322E31620000000602010300000000E3
62384+:10B0400000000001000000000000000000000000FF
62385+:10B0500000000000000000000000000000000000F0
62386+:10B0600000000000000000000000000000000000E0
62387+:10B0700000000000000000000000000000000000D0
62388+:10B0800000000000000000000000000000000000C0
62389+:10B0900000000000000000000000000000000000B0
62390+:10B0A00000000000000000000000000000000000A0
62391+:10B0B0000000000000000000000000000000000090
62392+:10B0C0000000000000000000000000000000000080
62393+:10B0D0000000000000000000000000000000000070
62394+:10B0E0000000000000000000000000000000000060
62395+:10B0F0000000000000000000000000000000000050
62396+:10B10000000000000000000000000000000000003F
62397+:10B11000000000000000000000000000000000002F
62398+:10B12000000000000000000000000000000000001F
62399+:10B13000000000000000000000000000000000000F
62400+:10B1400000000000000000000000000000000000FF
62401+:10B1500000000000000000000000000000000000EF
62402+:10B1600000000000000000000000000000000000DF
62403+:10B1700000000000000000000000000000000000CF
62404+:10B1800000000000000000000000000000000000BF
62405+:10B1900000000000000000000000000000000000AF
62406+:10B1A000000000000000000000000000000000009F
62407+:10B1B000000000000000000000000000000000008F
62408+:10B1C000000000000000000000000000000000007F
62409+:10B1D000000000000000000000000000000000006F
62410+:10B1E000000000000000000000000000000000005F
62411+:10B1F000000000000000000000000000000000004F
62412+:10B20000000000000000000000000000000000003E
62413+:10B21000000000000000000000000000000000002E
62414+:10B22000000000000000000000000000000000001E
62415+:10B23000000000000000000000000000000000000E
62416+:10B2400000000000000000000000000000000000FE
62417+:10B2500000000000000000000000000000000000EE
62418+:10B2600000000000000000000000000000000000DE
62419+:10B2700000000000000000000000000000000000CE
62420+:10B2800000000000000000000000000000000000BE
62421+:10B2900000000000000000000000000000000000AE
62422+:10B2A000000000000000000000000000000000009E
62423+:10B2B000000000000000000000000000000000008E
62424+:10B2C000000000000000000000000000000000007E
62425+:10B2D000000000000000000000000000000000006E
62426+:10B2E000000000000000000000000000000000005E
62427+:10B2F000000000000000000000000000000000004E
62428+:10B30000000000000000000000000000000000003D
62429+:10B31000000000000000000000000000000000002D
62430+:10B32000000000000000000000000000000000001D
62431+:10B33000000000000000000000000000000000000D
62432+:10B3400000000000000000000000000000000000FD
62433+:10B3500000000000000000000000000000000000ED
62434+:10B3600000000000000000000000000000000000DD
62435+:10B3700000000000000000000000000000000000CD
62436+:10B3800000000000000000000000000000000000BD
62437+:10B3900000000000000000000000000000000000AD
62438+:10B3A000000000000000000000000000000000009D
62439+:10B3B000000000000000000000000000000000008D
62440+:10B3C000000000000000000000000000000000007D
62441+:10B3D000000000000000000000000000000000006D
62442+:10B3E000000000000000000000000000000000005D
62443+:10B3F000000000000000000000000000000000004D
62444+:10B40000000000000000000000000000000000003C
62445+:10B41000000000000000000000000000000000002C
62446+:10B42000000000000000000000000000000000001C
62447+:10B43000000000000000000000000000000000000C
62448+:10B4400000000000000000000000000000000000FC
62449+:10B4500000000000000000000000000000000000EC
62450+:10B4600000000000000000000000000000000000DC
62451+:10B4700000000000000000000000000000000000CC
62452+:10B4800000000000000000000000000000000000BC
62453+:10B4900000000000000000000000000000000000AC
62454+:10B4A000000000000000000000000000000000009C
62455+:10B4B000000000000000000000000000000000008C
62456+:10B4C000000000000000000000000000000000007C
62457+:10B4D000000000000000000000000000000000006C
62458+:10B4E000000000000000000000000000000000005C
62459+:10B4F000000000000000000000000000000000004C
62460+:10B50000000000000000000000000000000000003B
62461+:10B51000000000000000000000000000000000002B
62462+:10B52000000000000000000000000000000000001B
62463+:10B53000000000000000000000000000000000000B
62464+:10B5400000000000000000000000000000000000FB
62465+:10B5500000000000000000000000000000000000EB
62466+:10B5600000000000000000000000000000000000DB
62467+:10B5700000000000000000000000000000000000CB
62468+:10B5800000000000000000000000000000000000BB
62469+:10B5900000000000000000000000000000000000AB
62470+:10B5A000000000000000000000000000000000009B
62471+:10B5B000000000000000000000000000000000008B
62472+:10B5C000000000000000000000000000000000007B
62473+:10B5D000000000000000000000000000000000006B
62474+:10B5E000000000000000000000000000000000005B
62475+:10B5F000000000000000000000000000000000004B
62476+:10B60000000000000000000000000000000000003A
62477+:10B61000000000000000000000000000000000002A
62478+:10B62000000000000000000000000000000000001A
62479+:10B63000000000000000000000000000000000000A
62480+:10B6400000000000000000000000000000000000FA
62481+:10B6500000000000000000000000000000000000EA
62482+:10B6600000000000000000000000000000000000DA
62483+:10B6700000000000000000000000000000000000CA
62484+:10B6800000000000000000000000000000000000BA
62485+:10B6900000000000000000000000000000000000AA
62486+:10B6A000000000000000000000000000000000009A
62487+:10B6B000000000000000000000000000000000008A
62488+:10B6C000000000000000000000000000000000007A
62489+:10B6D000000000000000000000000000000000006A
62490+:10B6E000000000000000000000000000000000005A
62491+:10B6F000000000000000000000000000000000004A
62492+:10B700000000000000000000000000000000000039
62493+:10B710000000000000000000000000000000000029
62494+:10B720000000000000000000000000000000000019
62495+:10B730000000000000000000000000000000000009
62496+:10B7400000000000000000000000000000000000F9
62497+:10B7500000000000000000000000000000000000E9
62498+:10B7600000000000000000000000000000000000D9
62499+:10B7700000000000000000000000000000000000C9
62500+:10B7800000000000000000000000000000000000B9
62501+:10B7900000000000000000000000000000000000A9
62502+:10B7A0000000000000000000000000000000000099
62503+:10B7B0000000000000000000000000000000000089
62504+:10B7C0000000000000000000000000000000000079
62505+:10B7D0000000000000000000000000000000000069
62506+:10B7E0000000000000000000000000000000000059
62507+:10B7F0000000000000000000000000000000000049
62508+:10B800000000000000000000000000000000000038
62509+:10B810000000000000000000000000000000000028
62510+:10B820000000000000000000000000000000000018
62511+:10B830000000000000000000000000000000000008
62512+:10B8400000000000000000000000000000000000F8
62513+:10B8500000000000000000000000000000000000E8
62514+:10B8600000000000000000000000000000000000D8
62515+:10B8700000000000000000000000000000000000C8
62516+:10B8800000000000000000000000000000000000B8
62517+:10B8900000000000000000000000000000000000A8
62518+:10B8A0000000000000000000000000000000000098
62519+:10B8B0000000000000000000000000000000000088
62520+:10B8C0000000000000000000000000000000000078
62521+:10B8D0000000000000000000000000000000000068
62522+:10B8E0000000000000000000000000000000000058
62523+:10B8F0000000000000000000000000000000000048
62524+:10B900000000000000000000000000000000000037
62525+:10B910000000000000000000000000000000000027
62526+:10B920000000000000000000000000000000000017
62527+:10B930000000000000000000000000000000000007
62528+:10B9400000000000000000000000000000000000F7
62529+:10B9500000000000000000000000000000000000E7
62530+:10B9600000000000000000000000000000000000D7
62531+:10B9700000000000000000000000000000000000C7
62532+:10B9800000000000000000000000000000000000B7
62533+:10B9900000000000000000000000000000000000A7
62534+:10B9A0000000000000000000000000000000000097
62535+:10B9B0000000000000000000000000000000000087
62536+:10B9C0000000000000000000000000000000000077
62537+:10B9D0000000000000000000000000000000000067
62538+:10B9E0000000000000000000000000000000000057
62539+:10B9F0000000000000000000000000000000000047
62540+:10BA00000000000000000000000000000000000036
62541+:10BA10000000000000000000000000000000000026
62542+:10BA20000000000000000000000000000000000016
62543+:10BA30000000000000000000000000000000000006
62544+:10BA400000000000000000000000000000000000F6
62545+:10BA500000000000000000000000000000000000E6
62546+:10BA600000000000000000000000000000000000D6
62547+:10BA700000000000000000000000000000000000C6
62548+:10BA800000000000000000000000000000000000B6
62549+:10BA900000000000000000000000000000000000A6
62550+:10BAA0000000000000000000000000000000000096
62551+:10BAB0000000000000000000000000000000000086
62552+:10BAC0000000000000000000000000000000000076
62553+:10BAD0000000000000000000000000000000000066
62554+:10BAE0000000000000000000000000000000000056
62555+:10BAF0000000000000000000000000000000000046
62556+:10BB00000000000000000000000000000000000035
62557+:10BB10000000000000000000000000000000000025
62558+:10BB20000000000000000000000000000000000015
62559+:10BB30000000000000000000000000000000000005
62560+:10BB400000000000000000000000000000000000F5
62561+:10BB500000000000000000000000000000000000E5
62562+:10BB600000000000000000000000000000000000D5
62563+:10BB700000000000000000000000000000000000C5
62564+:10BB800000000000000000000000000000000000B5
62565+:10BB900000000000000000000000000000000000A5
62566+:10BBA0000000000000000000000000000000000095
62567+:10BBB0000000000000000000000000000000000085
62568+:10BBC0000000000000000000000000000000000075
62569+:10BBD0000000000000000000000000000000000065
62570+:10BBE0000000000000000000000000000000000055
62571+:10BBF0000000000000000000000000000000000045
62572+:10BC00000000000000000000000000000000000034
62573+:10BC10000000000000000000000000000000000024
62574+:10BC20000000000000000000000000000000000014
62575+:10BC30000000000000000000000000000000000004
62576+:10BC400000000000000000000000000000000000F4
62577+:10BC500000000000000000000000000000000000E4
62578+:10BC600000000000000000000000000000000000D4
62579+:10BC700000000000000000000000000000000000C4
62580+:10BC800000000000000000000000000000000000B4
62581+:10BC900000000000000000000000000000000000A4
62582+:10BCA0000000000000000000000000000000000094
62583+:10BCB0000000000000000000000000000000000084
62584+:10BCC0000000000000000000000000000000000074
62585+:10BCD0000000000000000000000000000000000064
62586+:10BCE0000000000000000000000000000000000054
62587+:10BCF0000000000000000000000000000000000044
62588+:10BD00000000000000000000000000000000000033
62589+:10BD10000000000000000000000000000000000023
62590+:10BD20000000000000000000000000000000000013
62591+:10BD30000000000000000000000000000000000003
62592+:10BD400000000000000000000000000000000000F3
62593+:10BD500000000000000000000000000000000000E3
62594+:10BD600000000000000000000000000000000000D3
62595+:10BD700000000000000000000000000000000000C3
62596+:10BD800000000000000000000000000000000000B3
62597+:10BD900000000000000000000000000000000000A3
62598+:10BDA0000000000000000000000000000000000093
62599+:10BDB0000000000000000000000000000000000083
62600+:10BDC0000000000000000000000000000000000073
62601+:10BDD0000000000000000000000000000000000063
62602+:10BDE0000000000000000000000000000000000053
62603+:10BDF0000000000000000000000000000000000043
62604+:10BE00000000000000000000000000000000000032
62605+:10BE10000000000000000000000000000000000022
62606+:10BE20000000000000000000000000000000000012
62607+:10BE30000000000000000000000000000000000002
62608+:10BE400000000000000000000000000000000000F2
62609+:10BE500000000000000000000000000000000000E2
62610+:10BE600000000000000000000000000000000000D2
62611+:10BE700000000000000000000000000000000000C2
62612+:10BE800000000000000000000000000000000000B2
62613+:10BE900000000000000000000000000000000000A2
62614+:10BEA0000000000000000000000000000000000092
62615+:10BEB0000000000000000000000000000000000082
62616+:10BEC0000000000000000000000000000000000072
62617+:10BED0000000000000000000000000000000000062
62618+:10BEE0000000000000000000000000000000000052
62619+:10BEF0000000000000000000000000000000000042
62620+:10BF00000000000000000000000000000000000031
62621+:10BF10000000000000000000000000000000000021
62622+:10BF20000000000000000000000000000000000011
62623+:10BF30000000000000000000000000000000000001
62624+:10BF400000000000000000000000000000000000F1
62625+:10BF500000000000000000000000000000000000E1
62626+:10BF600000000000000000000000000000000000D1
62627+:10BF700000000000000000000000000000000000C1
62628+:10BF800000000000000000000000000000000000B1
62629+:10BF900000000000000000000000000000000000A1
62630+:10BFA0000000000000000000000000000000000091
62631+:10BFB0000000000000000000000000000000000081
62632+:10BFC0000000000000000000000000000000000071
62633+:10BFD0000000000000000000000000000000000061
62634+:10BFE0000000000000000000000000000000000051
62635+:10BFF0000000000000000000000000000000000041
62636+:10C000000000000000000000000000000000000030
62637+:10C010000000000000000000000000000000000020
62638+:10C020000000000000000000000000000000000010
62639+:10C030000000000000000000000000000000000000
62640+:10C0400000000000000000000000000000000000F0
62641+:10C0500000000000000000000000000000000000E0
62642+:10C0600000000000000000000000000000000000D0
62643+:10C0700000000000000000000000000000000000C0
62644+:10C0800000000000000000000000000000000000B0
62645+:10C0900000000000000000000000000000000000A0
62646+:10C0A0000000000000000000000000000000000090
62647+:10C0B0000000000000000000000000000000000080
62648+:10C0C0000000000000000000000000000000000070
62649+:10C0D0000000000000000000000000000000000060
62650+:10C0E0000000000000000000000000000000000050
62651+:10C0F0000000000000000000000000000000000040
62652+:10C10000000000000000000000000000000000002F
62653+:10C11000000000000000000000000000000000001F
62654+:10C12000000000000000000000000000000000000F
62655+:10C1300000000000000000000000000000000000FF
62656+:10C1400000000000000000000000000000000000EF
62657+:10C1500000000000000000000000000000000000DF
62658+:10C1600000000000000000000000000000000000CF
62659+:10C1700000000000000000000000000000000000BF
62660+:10C1800000000000000000000000000000000000AF
62661+:10C19000000000000000000000000000000000009F
62662+:10C1A000000000000000000000000000000000008F
62663+:10C1B000000000000000000000000000000000007F
62664+:10C1C000000000000000000000000000000000006F
62665+:10C1D000000000000000000000000000000000005F
62666+:10C1E000000000000000000000000000000000004F
62667+:10C1F000000000000000000000000000000000003F
62668+:10C20000000000000000000000000000000000002E
62669+:10C21000000000000000000000000000000000001E
62670+:10C22000000000000000000000000000000000000E
62671+:10C2300000000000000000000000000000000000FE
62672+:10C2400000000000000000000000000000000000EE
62673+:10C2500000000000000000000000000000000000DE
62674+:10C2600000000000000000000000000000000000CE
62675+:10C2700000000000000000000000000000000000BE
62676+:10C2800000000000000000000000000000000000AE
62677+:10C29000000000000000000000000000000000009E
62678+:10C2A000000000000000000000000000000000008E
62679+:10C2B000000000000000000000000000000000007E
62680+:10C2C000000000000000000000000000000000006E
62681+:10C2D000000000000000000000000000000000005E
62682+:10C2E000000000000000000000000000000000004E
62683+:10C2F000000000000000000000000000000000003E
62684+:10C30000000000000000000000000000000000002D
62685+:10C31000000000000000000000000000000000001D
62686+:10C32000000000000000000000000000000000000D
62687+:10C3300000000000000000000000000000000000FD
62688+:10C3400000000000000000000000000000000000ED
62689+:10C3500000000000000000000000000000000000DD
62690+:10C3600000000000000000000000000000000000CD
62691+:10C3700000000000000000000000000000000000BD
62692+:10C3800000000000000000000000000000000000AD
62693+:10C39000000000000000000000000000000000009D
62694+:10C3A000000000000000000000000000000000008D
62695+:10C3B000000000000000000000000000000000007D
62696+:10C3C000000000000000000000000000000000006D
62697+:10C3D000000000000000000000000000000000005D
62698+:10C3E000000000000000000000000000000000004D
62699+:10C3F000000000000000000000000000000000003D
62700+:10C40000000000000000000000000000000000002C
62701+:10C41000000000000000000000000000000000001C
62702+:10C42000000000000000000000000000000000000C
62703+:10C4300000000000000000000000000000000000FC
62704+:10C4400000000000000000000000000000000000EC
62705+:10C4500000000000000000000000000000000000DC
62706+:10C4600000000000000000000000000000000000CC
62707+:10C4700000000000000000000000000000000000BC
62708+:10C4800000000000000000000000000000000000AC
62709+:10C49000000000000000000000000000000000009C
62710+:10C4A000000000000000000000000000000000008C
62711+:10C4B000000000000000000000000000000000007C
62712+:10C4C000000000000000000000000000000000006C
62713+:10C4D000000000000000000000000000000000005C
62714+:10C4E000000000000000000000000000000000004C
62715+:10C4F000000000000000000000000000000000003C
62716+:10C50000000000000000000000000000000000002B
62717+:10C51000000000000000000000000000000000001B
62718+:10C52000000000000000000000000000000000000B
62719+:10C5300000000000000000000000000000000000FB
62720+:10C5400000000000000000000000000000000000EB
62721+:10C5500000000000000000000000000000000000DB
62722+:10C5600000000000000000000000000000000000CB
62723+:10C5700000000000000000000000000000000000BB
62724+:10C5800000000000000000000000000000000000AB
62725+:10C59000000000000000000000000000000000009B
62726+:10C5A000000000000000000000000000000000008B
62727+:10C5B000000000000000000000000000000000007B
62728+:10C5C000000000000000000000000000000000006B
62729+:10C5D000000000000000000000000000000000005B
62730+:10C5E000000000000000000000000000000000004B
62731+:10C5F000000000000000000000000000000000003B
62732+:10C60000000000000000000000000000000000002A
62733+:10C61000000000000000000000000000000000001A
62734+:10C62000000000000000000000000000000000000A
62735+:10C6300000000000000000000000000000000000FA
62736+:10C6400000000000000000000000000000000000EA
62737+:10C6500000000000000000000000000000000000DA
62738+:10C6600000000000000000000000000000000000CA
62739+:10C6700000000000000000000000000000000000BA
62740+:10C6800000000000000000000000000000000000AA
62741+:10C69000000000000000000000000000000000009A
62742+:10C6A000000000000000000000000000000000008A
62743+:10C6B000000000000000000000000000000000007A
62744+:10C6C000000000000000000000000000000000006A
62745+:10C6D000000000000000000000000000000000005A
62746+:10C6E000000000000000000000000000000000004A
62747+:10C6F000000000000000000000000000000000003A
62748+:10C700000000000000000000000000000000000029
62749+:10C710000000000000000000000000000000000019
62750+:10C720000000000000000000000000000000000009
62751+:10C7300000000000000000000000000000000000F9
62752+:10C7400000000000000000000000000000000000E9
62753+:10C7500000000000000000000000000000000000D9
62754+:10C7600000000000000000000000000000000000C9
62755+:10C7700000000000000000000000000000000000B9
62756+:10C7800000000000000000000000000000000000A9
62757+:10C790000000000000000000000000000000000099
62758+:10C7A0000000000000000000000000000000000089
62759+:10C7B0000000000000000000000000000000000079
62760+:10C7C0000000000000000000000000000000000069
62761+:10C7D0000000000000000000000000000000000059
62762+:10C7E0000000000000000000000000000000000049
62763+:10C7F0000000000000000000000000000000000039
62764+:10C800000000000000000000000000000000000028
62765+:10C810000000000000000000000000000000000018
62766+:10C820000000000000000000000000000000000008
62767+:10C8300000000000000000000000000000000000F8
62768+:10C8400000000000000000000000000000000000E8
62769+:10C8500000000000000000000000000000000000D8
62770+:10C8600000000000000000000000000000000000C8
62771+:10C8700000000000000000000000000000000000B8
62772+:10C8800000000000000000000000000000000000A8
62773+:10C890000000000000000000000000000000000098
62774+:10C8A0000000000000000000000000000000000088
62775+:10C8B0000000000000000000000000000000000078
62776+:10C8C0000000000000000000000000000000000068
62777+:10C8D0000000000000000000000000000000000058
62778+:10C8E0000000000000000000000000000000000048
62779+:10C8F0000000000000000000000000000000000038
62780+:10C900000000000000000000000000000000000027
62781+:10C910000000000000000000000000000000000017
62782+:10C920000000000000000000000000000000000007
62783+:10C9300000000000000000000000000000000000F7
62784+:10C9400000000000000000000000000000000000E7
62785+:10C9500000000000000000000000000000000000D7
62786+:10C9600000000000000000000000000000000000C7
62787+:10C9700000000000000000000000000000000000B7
62788+:10C9800000000000000000000000000000000000A7
62789+:10C990000000000000000000000000000000000097
62790+:10C9A0000000000000000000000000000000000087
62791+:10C9B0000000000000000000000000000000000077
62792+:10C9C0000000000000000000000000000000000067
62793+:10C9D0000000000000000000000000000000000057
62794+:10C9E0000000000000000000000000000000000047
62795+:10C9F0000000000000000000000000000000000037
62796+:10CA00000000000000000000000000000000000026
62797+:10CA10000000000000000000000000000000000016
62798+:10CA20000000000000000000000000000000000006
62799+:10CA300000000000000000000000000000000000F6
62800+:10CA400000000000000000000000000000000000E6
62801+:10CA500000000000000000000000000000000000D6
62802+:10CA600000000000000000000000000000000000C6
62803+:10CA700000000000000000000000000000000000B6
62804+:10CA800000000000000000000000000000000000A6
62805+:10CA90000000000000000000000000000000000096
62806+:10CAA0000000000000000000000000000000000086
62807+:10CAB0000000000000000000000000000000000076
62808+:10CAC0000000000000000000000000000000000066
62809+:10CAD0000000000000000000000000000000000056
62810+:10CAE0000000000000000000000000000000000046
62811+:10CAF0000000000000000000000000000000000036
62812+:10CB00000000000000000000000000000000000025
62813+:10CB10000000000000000000000000000000000015
62814+:10CB20000000000000000000000000000000000005
62815+:10CB300000000000000000000000000000000000F5
62816+:10CB400000000000000000000000000000000000E5
62817+:10CB500000000000000000000000000000000000D5
62818+:10CB600000000000000000000000000000000000C5
62819+:10CB700000000000000000000000000000000000B5
62820+:10CB800000000000000000000000000000000000A5
62821+:10CB90000000000000000000000000000000000095
62822+:10CBA0000000000000000000000000000000000085
62823+:10CBB0000000000000000000000000000000000075
62824+:10CBC0000000000000000000000000000000000065
62825+:10CBD0000000000000000000000000000000000055
62826+:10CBE0000000000000000000000000000000000045
62827+:10CBF0000000000000000000000000000000000035
62828+:10CC00000000000000000000000000000000000024
62829+:10CC10000000000000000000000000000000000014
62830+:10CC20000000000000000000000000000000000004
62831+:10CC300000000000000000000000000000000000F4
62832+:10CC400000000000000000000000000000000000E4
62833+:10CC500000000000000000000000000000000000D4
62834+:10CC600000000000000000000000000000000000C4
62835+:10CC700000000000000000000000000000000000B4
62836+:10CC800000000000000000000000000000000000A4
62837+:10CC90000000000000000000000000000000000094
62838+:10CCA0000000000000000000000000000000000084
62839+:10CCB0000000000000000000000000000000000074
62840+:10CCC0000000000000000000000000000000000064
62841+:10CCD0000000000000000000000000000000000054
62842+:10CCE0000000000000000000000000000000000044
62843+:10CCF0000000000000000000000000000000000034
62844+:10CD00000000000000000000000000000000000023
62845+:10CD10000000000000000000000000000000000013
62846+:10CD20000000000000000000000000000000000003
62847+:10CD300000000000000000000000000000000000F3
62848+:10CD400000000000000000000000000000000000E3
62849+:10CD500000000000000000000000000000000000D3
62850+:10CD600000000000000000000000000000000000C3
62851+:10CD700000000000000000000000000000000000B3
62852+:10CD800000000000000000000000000000000000A3
62853+:10CD90000000000000000000000000000000000093
62854+:10CDA0000000000000000000000000000000000083
62855+:10CDB0000000000000000000000000000000000073
62856+:10CDC0000000000000000000000000000000000063
62857+:10CDD0000000000000000000000000000000000053
62858+:10CDE0000000000000000000000000000000000043
62859+:10CDF0000000000000000000000000000000000033
62860+:10CE00000000000000000000000000000000000022
62861+:10CE10000000000000000000000000000000000012
62862+:10CE20000000000000000000000000000000000002
62863+:10CE300000000000000000000000000000000000F2
62864+:10CE400000000000000000000000000000000000E2
62865+:10CE500000000000000000000000000000000000D2
62866+:10CE600000000000000000000000000000000000C2
62867+:10CE700000000000000000000000000000000000B2
62868+:10CE800000000000000000000000000000000000A2
62869+:10CE90000000000000000000000000000000000092
62870+:10CEA0000000000000000000000000000000000082
62871+:10CEB0000000000000000000000000000000000072
62872+:10CEC0000000000000000000000000000000000062
62873+:10CED0000000000000000000000000000000000052
62874+:10CEE0000000000000000000000000000000000042
62875+:10CEF0000000000000000000000000000000000032
62876+:10CF00000000000000000000000000000000000021
62877+:10CF10000000000000000000000000000000000011
62878+:10CF20000000000000000000000000000000000001
62879+:10CF300000000000000000000000000000000000F1
62880+:10CF400000000000000000000000000000000000E1
62881+:10CF500000000000000000000000000000000000D1
62882+:10CF600000000000000000000000000000000000C1
62883+:10CF700000000000000000000000000000000000B1
62884+:10CF800000000000000000000000000000000000A1
62885+:10CF90000000000000000000000000000000000091
62886+:10CFA0000000000000000000000000000000000081
62887+:10CFB0000000000000000000000000000000000071
62888+:10CFC0000000000000000000000000000000000061
62889+:10CFD0000000000000000000000000000000000051
62890+:10CFE0000000000000000000000000000000000041
62891+:10CFF0000000000000000000000000000000000031
62892+:10D000000000000000000000000000000000000020
62893+:10D010000000000000000000000000000000000010
62894+:10D020000000000000000000000000000000000000
62895+:10D0300000000000000000000000000000000000F0
62896+:10D0400000000000000000000000000000000000E0
62897+:10D0500000000000000000000000000000000000D0
62898+:10D0600000000000000000000000000000000000C0
62899+:10D0700000000000000000000000000000000000B0
62900+:10D0800000000000000000000000000000000000A0
62901+:10D090000000000000000000000000000000000090
62902+:10D0A0000000000000000000000000000000000080
62903+:10D0B0000000000000000000000000000000000070
62904+:10D0C0000000000000000000000000000000000060
62905+:10D0D0000000000000000000000000000000000050
62906+:10D0E0000000000000000000000000000000000040
62907+:10D0F0000000000000000000000000000000000030
62908+:10D10000000000000000000000000000000000001F
62909+:10D11000000000000000000000000000000000000F
62910+:10D1200000000000000000000000000000000000FF
62911+:10D1300000000000000000000000000000000000EF
62912+:10D1400000000000000000000000000000000000DF
62913+:10D1500000000000000000000000000000000000CF
62914+:10D1600000000000000000000000000000000000BF
62915+:10D1700000000000000000000000000000000000AF
62916+:10D18000000000000000000000000000000000009F
62917+:10D19000000000000000000000000000000000008F
62918+:10D1A000000000000000000000000000000000007F
62919+:10D1B000000000000000000000000000000000006F
62920+:10D1C000000000000000000000000000000000005F
62921+:10D1D000000000000000000000000000000000004F
62922+:10D1E000000000000000000000000000000000003F
62923+:10D1F000000000000000000000000000000000002F
62924+:10D20000000000000000000000000000000000001E
62925+:10D21000000000000000000000000000000000000E
62926+:10D2200000000000000000000000000000000000FE
62927+:10D2300000000000000000000000000000000000EE
62928+:10D2400000000000000000000000000000000000DE
62929+:10D2500000000000000000000000000000000000CE
62930+:10D2600000000000000000000000000000000000BE
62931+:10D2700000000000000000000000000000000000AE
62932+:10D28000000000000000000000000000000000009E
62933+:10D29000000000000000000000000000000000008E
62934+:10D2A000000000000000000000000000000000007E
62935+:10D2B000000000000000000000000000000000006E
62936+:10D2C000000000000000000000000000000000005E
62937+:10D2D000000000000000000000000000000000004E
62938+:10D2E000000000000000000000000000000000003E
62939+:10D2F000000000000000000000000000000000002E
62940+:10D30000000000000000000000000000000000001D
62941+:10D31000000000000000000000000000000000000D
62942+:10D3200000000000000000000000000000000000FD
62943+:10D3300000000000000000000000000000000000ED
62944+:10D3400000000000000000000000000000000000DD
62945+:10D3500000000000000000000000000000000000CD
62946+:10D3600000000000000000000000000000000000BD
62947+:10D3700000000000000000000000000000000000AD
62948+:10D38000000000000000000000000000000000009D
62949+:10D39000000000000000000000000000000000008D
62950+:10D3A000000000000000000000000000000000007D
62951+:10D3B000000000000000000000000000000000006D
62952+:10D3C000000000000000000000000000000000005D
62953+:10D3D000000000000000000000000000000000004D
62954+:10D3E000000000000000000000000000000000003D
62955+:10D3F000000000000000000000000000000000002D
62956+:10D40000000000000000000000000000000000001C
62957+:10D41000000000000000000000000000000000000C
62958+:10D4200000000000000000000000000000000000FC
62959+:10D4300000000000000000000000000000000000EC
62960+:10D4400000000000000000000000000000000000DC
62961+:10D4500000000000000000000000000000000000CC
62962+:10D4600000000000000000000000000000000000BC
62963+:10D4700000000000000000000000000000000000AC
62964+:10D48000000000000000000000000000000000009C
62965+:10D49000000000000000000000000000000000008C
62966+:10D4A000000000000000000000000000000000007C
62967+:10D4B000000000000000000000000000000000006C
62968+:10D4C000000000000000000000000000000000005C
62969+:10D4D000000000000000000000000000000000004C
62970+:10D4E000000000000000000000000000000000003C
62971+:10D4F000000000000000000000000000000000002C
62972+:10D50000000000000000000000000000000000001B
62973+:10D51000000000000000000000000000000000000B
62974+:10D5200000000000000000000000000000000000FB
62975+:10D5300000000000000000000000000000000000EB
62976+:10D5400000000000000000000000000000000000DB
62977+:10D5500000000000000000000000000000000000CB
62978+:10D5600000000000000000000000000000000000BB
62979+:10D5700000000000000000000000000000000000AB
62980+:10D58000000000000000000000000000000000009B
62981+:10D59000000000000000000000000000000000008B
62982+:10D5A000000000000000000000000000000000007B
62983+:10D5B000000000000000000000000000000000006B
62984+:10D5C000000000000000000000000000000000005B
62985+:10D5D000000000000000000000000000000000004B
62986+:10D5E000000000000000000000000000000000003B
62987+:10D5F000000000000000000000000000000000002B
62988+:10D60000000000000000000000000000000000001A
62989+:10D61000000000000000000000000000000000000A
62990+:10D6200000000000000000000000000000000000FA
62991+:10D6300000000000000000000000000000000000EA
62992+:10D6400000000000000000000000000000000000DA
62993+:10D6500000000000000000000000000000000000CA
62994+:10D6600000000000000000000000000000000000BA
62995+:10D6700000000000000000000000000000000000AA
62996+:10D68000000000000000000000000000000000009A
62997+:10D69000000000000000000000000000000000008A
62998+:10D6A000000000000000000000000000000000007A
62999+:10D6B000000000000000000000000000000000006A
63000+:10D6C000000000000000000000000000000000005A
63001+:10D6D000000000000000000000000000000000004A
63002+:10D6E000000000000000000000000000000000003A
63003+:10D6F000000000000000000000000000000000002A
63004+:10D700000000000000000000000000000000000019
63005+:10D710000000000000000000000000000000000009
63006+:10D7200000000000000000000000000000000000F9
63007+:10D7300000000000000000000000000000000000E9
63008+:10D7400000000000000000000000000000000000D9
63009+:10D7500000000000000000000000000000000000C9
63010+:10D7600000000000000000000000000000000000B9
63011+:10D7700000000000000000000000000000000000A9
63012+:10D780000000000000000000000000000000000099
63013+:10D790000000000000000000000000000000000089
63014+:10D7A0000000000000000000000000000000000079
63015+:10D7B0000000000000000000000000000000000069
63016+:10D7C0000000000000000000000000000000000059
63017+:10D7D0000000000000000000000000000000000049
63018+:10D7E0000000000000000000000000000000000039
63019+:10D7F0000000000000000000000000000000000029
63020+:10D800000000000000000000000000000000000018
63021+:10D810000000000000000000000000000000000008
63022+:10D8200000000000000000000000000000000000F8
63023+:10D8300000000000000000000000000000000000E8
63024+:10D8400000000000000000000000000000000000D8
63025+:10D8500000000000000000000000000000000000C8
63026+:10D8600000000000000000000000000000000000B8
63027+:10D8700000000000000000000000000000000000A8
63028+:10D880000000000000000000000000000000000098
63029+:10D890000000000000000000000000000000000088
63030+:10D8A0000000000000000000000000000000000078
63031+:10D8B0000000000000000000000000000000000068
63032+:10D8C0000000000000000000000000000000000058
63033+:10D8D0000000000000000000000000000000000048
63034+:10D8E0000000000000000000000000000000000038
63035+:10D8F0000000000000000000000000000000000028
63036+:10D900000000000000000000000000000000000017
63037+:10D910000000000000000000000000000000000007
63038+:10D9200000000000000000000000000000000000F7
63039+:10D9300000000000000000000000000000000000E7
63040+:10D9400000000000000000000000000000000000D7
63041+:10D9500000000000000000000000000000000000C7
63042+:10D9600000000000000000000000000000000000B7
63043+:10D9700000000000000000000000000000000000A7
63044+:10D980000000000000000000000000000000000097
63045+:10D990000000000000000000000000000000000087
63046+:10D9A0000000000000000000000000000000000077
63047+:10D9B0000000000000000000000000000000000067
63048+:10D9C0000000000000000000000000000000000057
63049+:10D9D0000000000000000000000000000000000047
63050+:10D9E0000000000000000000000000000000000037
63051+:10D9F0000000000000000000000000000000000027
63052+:10DA00000000000000000000000000000000000016
63053+:10DA10000000000000000000000000000000000006
63054+:10DA200000000000000000000000000000000000F6
63055+:10DA300000000000000000000000000000000000E6
63056+:10DA400000000000000000000000000000000000D6
63057+:10DA500000000000000000000000000000000000C6
63058+:10DA600000000000000000000000000000000000B6
63059+:10DA700000000000000000000000000000000000A6
63060+:10DA80000000000000000000000000000000000096
63061+:10DA90000000000000000000000000000000000086
63062+:10DAA0000000000000000000000000000000000076
63063+:10DAB0000000000000000000000000000000000066
63064+:10DAC0000000000000000000000000000000000056
63065+:10DAD0000000000000000000000000000000000046
63066+:10DAE0000000000000000000000000000000000036
63067+:10DAF0000000000000000000000000000000000026
63068+:10DB00000000000000000000000000000000000015
63069+:10DB10000000000000000000000000000000000005
63070+:10DB200000000000000000000000000000000000F5
63071+:10DB300000000000000000000000000000000000E5
63072+:10DB400000000000000000000000000000000000D5
63073+:10DB500000000000000000000000000000000000C5
63074+:10DB600000000000000000000000000000000000B5
63075+:10DB700000000000000000000000000000000000A5
63076+:10DB80000000000000000000000000000000000095
63077+:10DB90000000000000000000000000000000000085
63078+:10DBA0000000000000000000000000000000000075
63079+:10DBB0000000000000000000000000000000000065
63080+:10DBC0000000000000000000000000000000000055
63081+:10DBD0000000000000000000000000000000000045
63082+:10DBE0000000000000000000000000000000000035
63083+:10DBF0000000000000000000000000000000000025
63084+:10DC00000000000000000000000000000000000014
63085+:10DC10000000000000000000000000000000000004
63086+:10DC200000000000000000000000000000000000F4
63087+:10DC300000000000000000000000000000000000E4
63088+:10DC400000000000000000000000000000000000D4
63089+:10DC500000000000000000000000000000000000C4
63090+:10DC600000000000000000000000000000000000B4
63091+:10DC700000000000000000000000000000000000A4
63092+:10DC80000000000000000000000000000000000094
63093+:10DC90000000000000000000000000000000000084
63094+:10DCA0000000000000000000000000000000000074
63095+:10DCB0000000000000000000000000000000000064
63096+:10DCC0000000000000000000000000000000000054
63097+:10DCD0000000000000000000000000000000000044
63098+:10DCE0000000000000000000000000000000000034
63099+:10DCF0000000000000000000000000000000000024
63100+:10DD00000000000000000000000000000000000013
63101+:10DD10000000000000000000000000000000000003
63102+:10DD200000000000000000000000000000000000F3
63103+:10DD300000000000000000000000000000000000E3
63104+:10DD400000000000000000000000000000000000D3
63105+:10DD500000000000000000000000000000000000C3
63106+:10DD600000000000000000000000000000000000B3
63107+:10DD700000000000000000000000000000000000A3
63108+:10DD80000000000000000000000000000000000093
63109+:10DD90000000000000000000000000000000000083
63110+:10DDA0000000000000000000000000000000000073
63111+:10DDB0000000000000000000000000000000000063
63112+:10DDC0000000000000000000000000000000000053
63113+:10DDD0000000000000000000000000000000000043
63114+:10DDE0000000000000000000000000000000000033
63115+:10DDF0000000000000000000000000000000000023
63116+:10DE00000000000000000000000000000000000012
63117+:10DE10000000000000000000000000000000000002
63118+:10DE200000000000000000000000000000000000F2
63119+:10DE300000000000000000000000000000000000E2
63120+:10DE400000000000000000000000000000000000D2
63121+:10DE500000000000000000000000000000000000C2
63122+:10DE600000000000000000000000000000000000B2
63123+:10DE700000000000000000000000000000000000A2
63124+:10DE80000000000000000000000000000000000092
63125+:10DE90000000000000000000000000000000000082
63126+:10DEA0000000000000000000000000000000000072
63127+:10DEB0000000000000000000000000000000000062
63128+:10DEC0000000000000000000000000000000000052
63129+:10DED0000000000000000000000000000000000042
63130+:10DEE0000000000000000000000000000000000032
63131+:10DEF0000000000000000000000000000000000022
63132+:10DF00000000000000000000000000000000000011
63133+:10DF10000000000000000000000000000000000001
63134+:10DF200000000000000000000000000000000000F1
63135+:10DF300000000000000000000000000000000000E1
63136+:10DF400000000000000000000000000000000000D1
63137+:10DF500000000000000000000000000000000000C1
63138+:10DF600000000000000000000000000000000000B1
63139+:10DF700000000000000000000000000000000000A1
63140+:10DF80000000000000000000000000000000000091
63141+:10DF90000000000000000000000000000000000081
63142+:10DFA0000000000000000000000000000000000071
63143+:10DFB0000000000000000000000000000000000061
63144+:10DFC0000000000000000000000000000000000051
63145+:10DFD0000000000000000000000000000000000041
63146+:10DFE0000000000000000000000000000000000031
63147+:10DFF0000000000000000000000000000000000021
63148+:10E000000000000000000000000000000000000010
63149+:10E010000000000000000000000000000000000000
63150+:10E0200000000000000000000000000000000000F0
63151+:10E0300000000000000000000000000000000000E0
63152+:10E0400000000000000000000000000000000000D0
63153+:10E0500000000000000000000000000000000000C0
63154+:10E0600000000000000000000000000000000000B0
63155+:10E0700000000000000000000000000000000000A0
63156+:10E080000000000000000000000000000000000090
63157+:10E090000000000000000000000000000000000080
63158+:10E0A0000000000000000000000000000000000070
63159+:10E0B0000000000000000000000000000000000060
63160+:10E0C0000000000000000000000000000000000050
63161+:10E0D0000000000000000000000000000000000040
63162+:10E0E0000000000000000000000000000000000030
63163+:10E0F0000000000000000000000000000000000020
63164+:10E10000000000000000000000000000000000000F
63165+:10E1100000000000000000000000000000000000FF
63166+:10E1200000000000000000000000000000000000EF
63167+:10E1300000000000000000000000000000000000DF
63168+:10E1400000000000000000000000000000000000CF
63169+:10E1500000000000000000000000000000000000BF
63170+:10E1600000000000000000000000000000000000AF
63171+:10E17000000000000000000000000000000000009F
63172+:10E18000000000000000000000000000000000008F
63173+:10E19000000000000000000000000000000000007F
63174+:10E1A000000000000000000000000000000000006F
63175+:10E1B000000000000000000000000000000000005F
63176+:10E1C000000000000000000000000000000000004F
63177+:10E1D000000000000000000000000000000000003F
63178+:10E1E000000000000000000000000000000000002F
63179+:10E1F000000000000000000000000000000000809F
63180+:10E20000000000000000000000000000000000000E
63181+:10E2100000000000000000000000000000000000FE
63182+:10E220000000000A000000000000000000000000E4
63183+:10E2300010000003000000000000000D0000000DB1
63184+:10E240003C020801244295C03C030801246397FC6A
63185+:10E25000AC4000000043202B1480FFFD244200044A
63186+:10E260003C1D080037BD9FFC03A0F0213C100800B6
63187+:10E27000261032103C1C0801279C95C00E0012BECF
63188+:10E28000000000000000000D3C02800030A5FFFFF0
63189+:10E2900030C600FF344301803C0880008D0901B87E
63190+:10E2A0000520FFFE00000000AC6400002404000212
63191+:10E2B000A4650008A066000AA064000BAC67001803
63192+:10E2C0003C03100003E00008AD0301B83C0560000A
63193+:10E2D0008CA24FF80440FFFE00000000ACA44FC029
63194+:10E2E0003C0310003C040200ACA44FC403E000084F
63195+:10E2F000ACA34FF89486000C00A050212488001491
63196+:10E3000000062B0200051080004448210109182B4B
63197+:10E310001060001100000000910300002C6400094F
63198+:10E320005080000991190001000360803C0D080134
63199+:10E3300025AD9258018D58218D67000000E000083E
63200+:10E340000000000091190001011940210109302B42
63201+:10E3500054C0FFF29103000003E000080000102108
63202+:10E360000A000CCC25080001910F0001240E000AC0
63203+:10E3700015EE00400128C8232F38000A1700003D81
63204+:10E38000250D00028D580000250F0006370E0100F4
63205+:10E39000AD4E0000910C000291AB000191A400026F
63206+:10E3A00091A60003000C2E00000B3C0000A71025D6
63207+:10E3B00000041A000043C8250326C025AD580004F8
63208+:10E3C000910E000691ED000191E7000291E5000336
63209+:10E3D000000E5E00000D6400016C30250007220075
63210+:10E3E00000C41025004518252508000A0A000CCC99
63211+:10E3F000AD430008910F000125040002240800022B
63212+:10E4000055E80001012020210A000CCC00804021A9
63213+:10E41000910C0001240B0003158B00160000000076
63214+:10E420008D580000910E000225080003370D0008EA
63215+:10E43000A14E00100A000CCCAD4D00009119000156
63216+:10E44000240F0004172F000B0000000091070002AA
63217+:10E45000910400038D43000000072A0000A410254A
63218+:10E460003466000425080004AD42000C0A000CCC00
63219+:10E47000AD46000003E000082402000127BDFFE8CC
63220+:10E48000AFBF0014AFB000100E00164E0080802108
63221+:10E490003C0480083485008090A600052403FFFE1C
63222+:10E4A0000200202100C310248FBF00148FB0001081
63223+:10E4B000A0A200050A00165827BD001827BDFFE8D6
63224+:10E4C000AFB00010AFBF00140E000FD40080802149
63225+:10E4D0003C06800834C5008090A40000240200504F
63226+:10E4E000308300FF106200073C09800002002021F9
63227+:10E4F0008FBF00148FB00010AD2001800A00108F74
63228+:10E5000027BD0018240801003C07800002002021DC
63229+:10E510008FBF00148FB00010ACE801800A00108F8C
63230+:10E5200027BD001827BDFF783C058008AFBE0080DE
63231+:10E53000AFB7007CAFB3006CAFB10064AFBF008475
63232+:10E54000AFB60078AFB50074AFB40070AFB200687A
63233+:10E55000AFB0006034A600803C0580008CB201287A
63234+:10E5600090C400098CA701043C020001309100FF17
63235+:10E5700000E218240000B8210000F021106000071C
63236+:10E58000000098213C0908008D2931F02413000176
63237+:10E59000252800013C010800AC2831F0ACA0008423
63238+:10E5A00090CC0005000C5827316A0001154000721C
63239+:10E5B000AFA0005090CD00002406002031A400FF41
63240+:10E5C00010860018240E0050108E009300000000EA
63241+:10E5D0003C1008008E1000DC260F00013C010800F2
63242+:10E5E000AC2F00DC0E0016C7000000000040182110
63243+:10E5F0008FBF00848FBE00808FB7007C8FB60078FD
63244+:10E600008FB500748FB400708FB3006C8FB2006848
63245+:10E610008FB100648FB000600060102103E000083B
63246+:10E6200027BD00880000000D3C1F8000AFA0003017
63247+:10E6300097E501168FE201043C04002030B9FFFF8A
63248+:10E64000004438240007182B00033140AFA60030E7
63249+:10E650008FF5010437F80C003C1600400338802188
63250+:10E6600002B6A02434C40040128000479215000D69
63251+:10E6700032A800201500000234860080008030217E
63252+:10E6800014C0009FAFA600303C0D800835A6008066
63253+:10E6900090CC0008318B0040516000063C06800899
63254+:10E6A000240E0004122E00A8240F0012122F003294
63255+:10E6B0003C06800834C401003C0280009447011AE3
63256+:10E6C0009619000E909F00088E18000830E3FFFF97
63257+:10E6D00003F9B00432B40004AFB6005CAFA3005835
63258+:10E6E0008E1600041280002EAFB8005434C3008090
63259+:10E6F000906800083105004014A0002500000000CB
63260+:10E700008C70005002D090230640000500000000ED
63261+:10E710008C71003402D1A82306A201678EE20008A2
63262+:10E72000126000063C1280003C1508008EB531F4E2
63263+:10E7300026B600013C010800AC3631F4AE4000447E
63264+:10E74000240300018FBF00848FBE00808FB7007C40
63265+:10E750008FB600788FB500748FB400708FB3006CE3
63266+:10E760008FB200688FB100648FB00060006010212C
63267+:10E7700003E0000827BD00880E000D2800002021BE
63268+:10E780000A000D75004018210A000D9500C02021D7
63269+:10E790000E00171702C020211440FFE10000000006
63270+:10E7A0003C0B8008356400808C8A003402CA482300
63271+:10E7B0000520001D000000003C1E08008FDE310017
63272+:10E7C00027D700013C010800AC3731001260000679
63273+:10E7D000024020213C1408008E9431F42690000160
63274+:10E7E0003C010800AC3031F40E00164E3C1E80088F
63275+:10E7F00037CD008091B700250240202136EE00047D
63276+:10E800000E001658A1AE00250E000CAC02402021CF
63277+:10E810000A000DCA240300013C17080126F796C020
63278+:10E820000A000D843C1F80008C86003002C66023E5
63279+:10E830001980000C2419000C908F004F3C14080024
63280+:10E840008E94310032B500FC35ED0001268E0001BA
63281+:10E850003C010800AC2E3100A08D004FAFA0005845
63282+:10E860002419000CAFB900308C9800300316A02397
63283+:10E870001A80010B8FA300580074F82A17E0FFD309
63284+:10E88000000000001074002A8FA5005802D4B021A7
63285+:10E8900000B410233044FFFFAFA4005832A8000298
63286+:10E8A0001100002E32AB00103C15800836B00080FD
63287+:10E8B0009216000832D30040526000FB8EE200083E
63288+:10E8C0000E00164E02402021240A0018A20A000958
63289+:10E8D000921100052409FFFE024020210229902404
63290+:10E8E0000E001658A2120005240400390000282149
63291+:10E8F0000E0016F2240600180A000DCA24030001B7
63292+:10E9000092FE000C3C0A800835490080001EBB00C6
63293+:10E910008D27003836F10081024020213225F08118
63294+:10E920000E000C9B30C600FF0A000DC10000000065
63295+:10E930003AA7000130E300011460FFA402D4B02123
63296+:10E940000A000E1D00000000024020210E001734B6
63297+:10E95000020028210A000D75004018211160FF7087
63298+:10E960003C0F80083C0D800835EE00808DC40038D7
63299+:10E970008FA300548DA60004006660231D80FF68ED
63300+:10E98000000000000064C02307020001AFA400548F
63301+:10E990003C1F08008FFF31E433F9000113200015FC
63302+:10E9A0008FAC00583C07800094E3011A10600012FD
63303+:10E9B0003C0680080E00216A024020213C03080129
63304+:10E9C000906396F13064000214800145000000005D
63305+:10E9D000306C0004118000078FAC0058306600FBDB
63306+:10E9E0003C010801A02696F132B500FCAFA000580A
63307+:10E9F0008FAC00583C06800834D30080AFB40018B8
63308+:10EA0000AFB60010AFAC00143C088000950B01209D
63309+:10EA10008E6F0030966A005C8FA3005C8FBF003061
63310+:10EA20003169FFFF3144FFFF8FAE005401341021E4
63311+:10EA3000350540000064382B0045C82103E7C02598
63312+:10EA4000AFB90020AFAF0028AFB80030AFAF00249F
63313+:10EA5000AFA0002CAFAE0034926D000831B40008B6
63314+:10EA6000168000BB020020218EE200040040F8095D
63315+:10EA700027A400108FAF003031F300025660000170
63316+:10EA800032B500FE3C048008349F008093F90008F2
63317+:10EA900033380040530000138FA400248C850004F9
63318+:10EAA0008FA7005410A700D52404001432B0000131
63319+:10EAB0001200000C8FA400242414000C1234011A3C
63320+:10EAC0002A2D000D11A001022413000E240E000AAD
63321+:10EAD000522E0001241E00088FAF002425E40001FF
63322+:10EAE000AFA400248FAA00143C0B80083565008079
63323+:10EAF000008A48218CB10030ACA9003090A4004EAF
63324+:10EB00008CA700303408FFFF0088180400E3F821C8
63325+:10EB1000ACBF00348FA600308FB900548FB8005CB2
63326+:10EB200030C200081040000B033898218CAC002044
63327+:10EB3000119300D330C600FF92EE000C8FA7003473
63328+:10EB400002402021000E6B0035B400800E000C9BAB
63329+:10EB50003285F0803C028008345000808E0F0030F7
63330+:10EB600001F1302318C00097264800803C070800B8
63331+:10EB70008CE731E42404FF80010418243118007F5D
63332+:10EB80003C1F80003C19800430F10001AFE300908D
63333+:10EB900012200006031928213C030801906396F116
63334+:10EBA00030690008152000C6306A00F73C10800864
63335+:10EBB00036040080908C004F318B000115600042BC
63336+:10EBC000000000003C0608008CC6319830CE0010D2
63337+:10EBD00051C0004230F9000190AF006B55E0003F9A
63338+:10EBE00030F9000124180001A0B8006B3C1180002E
63339+:10EBF0009622007A24470064A48700123C0D800806
63340+:10EC000035A5008090B40008329000401600000442
63341+:10EC10003C03800832AE000115C0008B00000000EC
63342+:10EC2000346400808C86002010D3000A3463010015
63343+:10EC30008C67000002C7782319E000978FBF00544B
63344+:10EC4000AC93002024130001AC760000AFB3005059
63345+:10EC5000AC7F000417C0004E000000008FA90050D8
63346+:10EC60001520000B000000003C030801906396F1A2
63347+:10EC7000306A00011140002E8FAB0058306400FE56
63348+:10EC80003C010801A02496F10A000D75000018212E
63349+:10EC90000E000CAC024020210A000F1300000000FF
63350+:10ECA0000A000E200000A0210040F80924040017EB
63351+:10ECB0000A000DCA240300010040F80924040016CC
63352+:10ECC0000A000DCA240300019094004F240DFFFE9A
63353+:10ECD000028D2824A085004F30F900011320000682
63354+:10ECE0003C0480083C030801906396F1307F0010DB
63355+:10ECF00017E00051306800EF34900080240A0001D2
63356+:10ED0000024020210E00164EA60A00129203002592
63357+:10ED100024090001AFA90050346200010240202103
63358+:10ED20000E001658A20200250A000EF93C0D8008BC
63359+:10ED30001160FE83000018218FA5003030AC000464
63360+:10ED40001180FE2C8FBF00840A000DCB240300012C
63361+:10ED500027A500380E000CB6AFA000385440FF4382
63362+:10ED60008EE200048FB40038329001005200FF3F61
63363+:10ED70008EE200048FA3003C8E6E0058006E682364
63364+:10ED800005A3FF39AE6300580A000E948EE200041A
63365+:10ED90000E00164E024020213C038008346800809B
63366+:10EDA000024020210E001658A11E000903C0302188
63367+:10EDB000240400370E0016F2000028210A000F116B
63368+:10EDC0008FA900508FAB00185960FF8D3C0D800853
63369+:10EDD0000E00164E02402021920C00252405000151
63370+:10EDE000AFA5005035820004024020210E001658C5
63371+:10EDF000A20200250A000EF93C0D800812240059D9
63372+:10EE00002A2300151060004D240900162408000C68
63373+:10EE10005628FF2732B000013C0A8008914C001BA5
63374+:10EE20002406FFBD241E000E01865824A14B001BA2
63375+:10EE30000A000EA532B000013C010801A02896F19D
63376+:10EE40000A000EF93C0D80088CB500308EFE0008DB
63377+:10EE50002404001826B6000103C0F809ACB600303F
63378+:10EE60003C030801906396F13077000116E0FF81C2
63379+:10EE7000306A00018FB200300A000D753243000481
63380+:10EE80003C1080009605011A50A0FF2B34C60010DC
63381+:10EE90000A000EC892EE000C8C6200001456FF6D42
63382+:10EEA000000000008C7800048FB9005403388823D8
63383+:10EEB0000621FF638FBF00540A000F0E0000000000
63384+:10EEC0003C010801A02A96F10A000F3030F9000138
63385+:10EED0001633FF028FAF00240A000EB0241E00106C
63386+:10EEE0000E00164E024020213C0B80083568008041
63387+:10EEF00091090025240A0001AFAA0050353300040F
63388+:10EF0000024020210E001658A11300253C050801DF
63389+:10EF100090A596F130A200FD3C010801A02296F1D7
63390+:10EF20000A000E6D004018212411000E53D1FEEA94
63391+:10EF3000241E00100A000EAF241E00165629FEDC07
63392+:10EF400032B000013C0A8008914C001B2406FFBD32
63393+:10EF5000241E001001865824A14B001B0A000EA598
63394+:10EF600032B000010A000EA4241E00123C038000EF
63395+:10EF70008C6201B80440FFFE24040800AC6401B8B0
63396+:10EF800003E000080000000030A5FFFF30C6FFFFCF
63397+:10EF90003C0780008CE201B80440FFFE34EA0180A7
63398+:10EFA000AD440000ACE400203C0480089483004899
63399+:10EFB0003068FFFF11000016AF88000824AB001274
63400+:10EFC000010B482B512000133C04800034EF01005A
63401+:10EFD00095EE00208F890000240D001A31CCFFFF30
63402+:10EFE00031274000A14D000B10E000362583FFFEC5
63403+:10EFF0000103C02B170000348F9900048F88000490
63404+:10F00000A5430014350700010A001003AF87000470
63405+:10F010003C04800024030003348201808F890000B7
63406+:10F020008F870004A043000B3C088000350C018052
63407+:10F03000A585000EA585001A8F85000C30EB800099
63408+:10F04000A5890010AD850028A58600081160000F75
63409+:10F050008F85001435190100972A00163158FFFCDE
63410+:10F06000270F000401E870218DCD400031A6FFFF7D
63411+:10F0700014C000072403BFFF3C02FFFF34487FFF9A
63412+:10F0800000E83824AF8700048F8500142403BFFFF5
63413+:10F090003C04800000E3582434830180A46B0026E4
63414+:10F0A000AC69002C10A0000300054C02A465001000
63415+:10F0B000A46900263C071000AC8701B803E00008F3
63416+:10F0C000000000008F990004240AFFFE032A382460
63417+:10F0D0000A001003AF87000427BDFFE88FA20028B5
63418+:10F0E00030A5FFFF30C6FFFFAFBF0010AF87000C99
63419+:10F0F000AF820014AF8000040E000FDBAF80000071
63420+:10F100008FBF001027BD001803E00008AF80001477
63421+:10F110003C06800034C4007034C701008C8A0000B3
63422+:10F1200090E500128F84000027BDFFF030A300FFA0
63423+:10F13000000318823082400010400037246500032D
63424+:10F140000005C8800326C0218F0E4000246F0004F4
63425+:10F15000000F6880AFAE000001A660218D8B4000DB
63426+:10F16000AFAB000494E900163128FFFC01063821FA
63427+:10F170008CE64000AFA600088FA9000800003021EF
63428+:10F18000000028213C07080024E701000A0010675E
63429+:10F19000240800089059000024A500012CAC000CA4
63430+:10F1A0000079C0210018788001E770218DCD000022
63431+:10F1B0001180000600CD302603A5102114A8FFF50C
63432+:10F1C00000051A005520FFF4905900003C0480000F
63433+:10F1D000348700703C0508008CA531048CE30000E6
63434+:10F1E0002CA2002010400009006A38230005488046
63435+:10F1F0003C0B0800256B3108012B402124AA00019B
63436+:10F20000AD0700003C010800AC2A310400C0102109
63437+:10F2100003E0000827BD0010308220001040000BE2
63438+:10F2200000055880016648218D24400024680004B0
63439+:10F2300000083880AFA4000000E618218C6540006B
63440+:10F24000AFA000080A001057AFA500040000000D91
63441+:10F250000A0010588FA9000827BDFFE03C07800076
63442+:10F2600034E60100AFBF001CAFB20018AFB100140C
63443+:10F27000AFB0001094C5000E8F87000030A4FFFFD0
63444+:10F280002483000430E2400010400010AF830028C7
63445+:10F290003C09002000E940241100000D30EC800002
63446+:10F2A0008F8A0004240BBFFF00EB38243543100085
63447+:10F2B000AF87000030F220001640000B3C1900041C
63448+:10F2C000241FFFBF0A0010B7007F102430EC80001D
63449+:10F2D000158000423C0E002030F220001240FFF862
63450+:10F2E0008F8300043C19000400F9C0241300FFF5CB
63451+:10F2F000241FFFBF34620040AF82000430E20100EF
63452+:10F300001040001130F010008F83002C10600006B8
63453+:10F310003C0F80003C05002000E52024148000C044
63454+:10F320003C0800043C0F800035EE010095CD001E26
63455+:10F3300095CC001C31AAFFFF000C5C00014B482556
63456+:10F34000AF89000C30F010001200000824110001F9
63457+:10F3500030F100201620008B3C18100000F890249B
63458+:10F36000164000823C040C002411000130E801002A
63459+:10F370001500000B3C0900018F85000430A94000F6
63460+:10F38000152000073C0900013C0C1F0100EC58242B
63461+:10F390003C0A1000116A01183C1080003C09000171
63462+:10F3A00000E9302410C000173C0B10003C18080086
63463+:10F3B0008F1800243307000214E0014024030001E9
63464+:10F3C0008FBF001C8FB200188FB100148FB00010D7
63465+:10F3D0000060102103E0000827BD002000EE682433
63466+:10F3E00011A0FFBE30F220008F8F00043C11FFFF00
63467+:10F3F00036307FFF00F0382435E380000A0010A685
63468+:10F40000AF87000000EB102450400065AF8000245F
63469+:10F410008F8C002C3C0D0F0000ED18241580008807
63470+:10F42000AF83001030E8010011000086938F0010B8
63471+:10F430003C0A0200106A00833C1280003650010032
63472+:10F44000920500139789002A3626000230AF00FF8C
63473+:10F4500025EE0004000E19C03C0480008C9801B811
63474+:10F460000700FFFE34880180AD0300003C198008CE
63475+:10F47000AC830020973100483225FFFF10A0015CCB
63476+:10F48000AF8500082523001200A3F82B53E0015993
63477+:10F490008F850004348D010095AC00202402001AF1
63478+:10F4A00030E44000318BFFFFA102000B108001927D
63479+:10F4B0002563FFFE00A3502B154001908F8F0004A1
63480+:10F4C000A50300148F88000435050001AF850004F2
63481+:10F4D0003C08800035190180A729000EA729001AD1
63482+:10F4E0008F89000C30B18000A7270010AF290028B9
63483+:10F4F000A72600081220000E3C04800035020100FF
63484+:10F50000944C0016318BFFFC256400040088182100
63485+:10F510008C7F400033E6FFFF14C000053C048000F0
63486+:10F520003C0AFFFF354D7FFF00AD2824AF85000466
63487+:10F53000240EBFFF00AE402434850180A4A800261D
63488+:10F54000ACA7002C3C071000AC8701B800001821C4
63489+:10F550008FBF001C8FB200188FB100148FB0001045
63490+:10F560000060102103E0000827BD00203C020BFFD3
63491+:10F5700000E41824345FFFFF03E3C82B5320FF7B14
63492+:10F58000241100013C0608008CC6002C24C5000193
63493+:10F590003C010800AC25002C0A0010D42411000501
63494+:10F5A0008F85002410A0002FAF80001090A30000D2
63495+:10F5B000146000792419000310A0002A30E601002D
63496+:10F5C00010C000CC8F860010241F000210DF00C97D
63497+:10F5D0008F8B000C3C0708008CE7003824E4FFFF09
63498+:10F5E00014E0000201641824000018213C0D0800FA
63499+:10F5F00025AD0038006D1021904C00048F85002847
63500+:10F6000025830004000321C030A5FFFF3626000239
63501+:10F610000E000FDB000000000A00114D0000182151
63502+:10F6200000E8302414C0FF403C0F80000E00103D65
63503+:10F63000000000008F8700000A0010CAAF82000C93
63504+:10F64000938F00103C18080127189640000F90C0B7
63505+:10F6500002588021AF9000248F85002414A0FFD38E
63506+:10F66000AF8F00103C0480008C86400030C5010044
63507+:10F6700010A000BC322300043C0C08008D8C002438
63508+:10F6800024120004106000C23190000D3C04800080
63509+:10F690008C8D40003402FFFF11A201003231FFFBCC
63510+:10F6A0008C884000310A01005540000124110010EF
63511+:10F6B00030EE080011C000BE2419FFFB8F9800280F
63512+:10F6C0002F0F03EF51E000010219802430E90100FF
63513+:10F6D00011200014320800018F87002C14E000FB79
63514+:10F6E0008F8C000C3C05800034AB0100917F00132F
63515+:10F6F00033E300FF246A00042403FFFE0203802496
63516+:10F70000000A21C012000002023230253226FFFF1B
63517+:10F710000E000FDB9785002A1200FF290000182138
63518+:10F72000320800011100000D32180004240E0001FF
63519+:10F73000120E0002023230253226FFFF9785002A82
63520+:10F740000E000FDB00002021240FFFFE020F80249B
63521+:10F750001200FF1B00001821321800045300FF188C
63522+:10F760002403000102323025241200045612000145
63523+:10F770003226FFFF9785002A0E000FDB24040100CC
63524+:10F780002419FFFB021988241220FF0D0000182104
63525+:10F790000A0010E9240300011079009C00003021C8
63526+:10F7A00090AD00012402000211A200BE30EA004028
63527+:10F7B00090B90001241800011338007F30E900409F
63528+:10F7C0008CA600049785002A00C020210E000FDBC4
63529+:10F7D0003626000200004021010018218FBF001CC6
63530+:10F7E0008FB200188FB100148FB00010006010218C
63531+:10F7F00003E0000827BD0020360F010095EE000C45
63532+:10F8000031CD020015A0FEE63C0900013C1880083D
63533+:10F81000971200489789002A362600023248FFFFD7
63534+:10F82000AF8800083C0380008C7101B80620FFFE01
63535+:10F83000346A0180AD4000001100008E3C0F800052
63536+:10F84000253F0012011FC82B1320008B240E00033C
63537+:10F85000346C0100958B00202402001A30E4400033
63538+:10F860003163FFFFA142000B108000A72463FFFE5D
63539+:10F870000103682B15A000A52408FFFE34A5000194
63540+:10F88000A5430014AF8500043C0480002412BFFF90
63541+:10F8900000B2802434850180A4A9000EA4A9001A16
63542+:10F8A000A4A60008A4B00026A4A700103C071000DE
63543+:10F8B000AC8701B80A00114D000018213C038000FC
63544+:10F8C00034640100949F000E3C1908008F3900D861
63545+:10F8D0002404008033E5FFFF273100013C010800CC
63546+:10F8E000AC3100D80E000FDB240600030A00114DD6
63547+:10F8F00000001821240A000210CA00598F85002830
63548+:10F900003C0308008C6300D0240E0001106E005EE2
63549+:10F910002CCF000C24D2FFFC2E5000041600002136
63550+:10F9200000002021241800021078001B2CD9000CA4
63551+:10F9300024DFFFF82FE900041520FF330000202109
63552+:10F9400030EB020051600004000621C054C00022C8
63553+:10F9500030A5FFFF000621C030A5FFFF0A00117D82
63554+:10F96000362600023C0908008D29002431300001B0
63555+:10F970005200FEF7000018219785002A3626000263
63556+:10F980000E000FDB000020210A00114D000018219D
63557+:10F990000A00119C241200021320FFE624DFFFF866
63558+:10F9A0000000202130A5FFFF0A00117D362600024D
63559+:10F9B0000A0011AC021980245120FF828CA6000499
63560+:10F9C0003C05080190A5964110A0FF7E2408000187
63561+:10F9D0000A0011F0010018210E000FDB3226000191
63562+:10F9E0008F8600108F8500280A00124F000621C064
63563+:10F9F0008F8500043C18800024120003371001801A
63564+:10FA0000A212000B0A00112E3C08800090A30001F6
63565+:10FA1000241100011071FF70240800012409000264
63566+:10FA20005069000430E60040240800010A0011F08B
63567+:10FA30000100182150C0FFFD240800013C0C80008B
63568+:10FA4000358B01009563001094A40002307FFFFF06
63569+:10FA5000509FFF62010018210A001284240800014F
63570+:10FA60002CA803EF1100FE56240300010A001239EE
63571+:10FA700000000000240E000335EA0180A14E000BB7
63572+:10FA80000A00121C3C04800011E0FFA2000621C005
63573+:10FA900030A5FFFF0A00117D362600020A0011A5DD
63574+:10FAA000241100201140FFC63C1280003650010096
63575+:10FAB000960F001094AE000231E80FFF15C8FFC08A
63576+:10FAC000000000000A0011E690B900013C060800A1
63577+:10FAD0008CC6003824C4FFFF14C00002018418241F
63578+:10FAE000000018213C0D080025AD0038006D1021E4
63579+:10FAF0000A0011B6904300048F8F0004240EFFFE0D
63580+:10FB00000A00112C01EE28242408FFFE0A00121A14
63581+:10FB100000A8282427BDFFC8AFB00010AFBF003435
63582+:10FB20003C10600CAFBE0030AFB7002CAFB6002861
63583+:10FB3000AFB50024AFB40020AFB3001CAFB20018C3
63584+:10FB4000AFB100148E0E5000240FFF7F3C068000E2
63585+:10FB500001CF682435AC380C240B0003AE0C5000E8
63586+:10FB6000ACCB00083C010800AC2000200E001819A6
63587+:10FB7000000000003C0A0010354980513C06601628
63588+:10FB8000AE09537C8CC700003C0860148D0500A0B2
63589+:10FB90003C03FFFF00E320243C02535300051FC237
63590+:10FBA0001482000634C57C000003A08002869821E0
63591+:10FBB0008E7200043C116000025128218CBF007C31
63592+:10FBC0008CA200783C1E600037C420203C05080150
63593+:10FBD00024A59288AF820018AF9F001C0E0016DD8E
63594+:10FBE0002406000A3C190001273996403C01080010
63595+:10FBF000AC3931DC0E0020DDAF8000148FD708084F
63596+:10FC00002418FFF03C15570902F8B02412D502F56C
63597+:10FC100024040001AF80002C3C1480003697018042
63598+:10FC20003C1E080127DE9644369301008E900000AA
63599+:10FC30003205000310A0FFFD3207000110E000882C
63600+:10FC4000320600028E7100283C048000AE91002034
63601+:10FC50008E6500048E66000000A0382100C040219F
63602+:10FC60008C8301B80460FFFE3C0B0010240A0800DE
63603+:10FC700000AB4824AC8A01B8552000E0240BBFFF3C
63604+:10FC80009675000E3C1208008E52002030AC4000E9
63605+:10FC900032AFFFFF264E000125ED00043C010800B5
63606+:10FCA000AC2E0020118000E8AF8D00283C18002009
63607+:10FCB00000B8B02412C000E530B980002408BFFFAE
63608+:10FCC00000A8382434C81000AF87000030E62000B8
63609+:10FCD00010C000E92409FFBF3C03000400E328240E
63610+:10FCE00010A00002010910243502004030EA010092
63611+:10FCF00011400010AF8200048F8B002C11600007B0
63612+:10FD00003C0D002000ED6024118000043C0F000435
63613+:10FD100000EF702411C00239000000009668001E38
63614+:10FD20009678001C3115FFFF0018B40002B690252C
63615+:10FD3000AF92000C30F910001320001324150001BD
63616+:10FD400030FF002017E0000A3C04100000E41024FB
63617+:10FD50001040000D3C0A0C003C090BFF00EA18247F
63618+:10FD60003525FFFF00A3302B10C0000830ED010047
63619+:10FD70003C0C08008D8C002C24150005258B0001FF
63620+:10FD80003C010800AC2B002C30ED010015A0000B4D
63621+:10FD90003C0500018F85000430AE400055C00007CF
63622+:10FDA0003C0500013C161F0100F690243C0F10009A
63623+:10FDB000124F01CE000000003C05000100E5302498
63624+:10FDC00010C000AF3C0C10003C1F08008FFF002447
63625+:10FDD00033E90002152000712403000100601021A6
63626+:10FDE000104000083C0680003C08800035180100E7
63627+:10FDF0008F0F00243C056020ACAF00140000000011
63628+:10FE00003C0680003C194000ACD9013800000000DD
63629+:10FE10005220001332060002262B0140262C0080BF
63630+:10FE2000240EFF80016E2024018E6824000D1940ED
63631+:10FE3000318A007F0004A9403172007F3C16200007
63632+:10FE400036C20002006A482502B2382500E2882541
63633+:10FE50000122F825ACDF0830ACD1083032060002B0
63634+:10FE600010C0FF723C188000370501408CA80000CC
63635+:10FE700024100040AF08002090AF000831E300706C
63636+:10FE8000107000D428790041532000082405006038
63637+:10FE9000241100201071000E3C0A40003C09800033
63638+:10FEA000AD2A01780A001304000000001465FFFB6E
63639+:10FEB0003C0A40000E001FFA000000003C0A40000F
63640+:10FEC0003C098000AD2A01780A00130400000000FC
63641+:10FED00090A90009241F00048CA70000312800FF0E
63642+:10FEE000111F01B22503FFFA2C7200061240001404
63643+:10FEF0003C0680008CA9000494A4000A310500FF90
63644+:10FF000000095E022D6A00083086FFFF15400002DE
63645+:10FF10002567000424070003240C000910AC01FA33
63646+:10FF200028AD000A11A001DE2410000A240E0008EA
63647+:10FF300010AE0028000731C000C038213C06800008
63648+:10FF40008CD501B806A0FFFE34D20180AE47000078
63649+:10FF500034CB0140916E0008240300023C0A4000AB
63650+:10FF600031C400FF00046A0001A86025A64C000807
63651+:10FF7000A243000B9562000A3C0810003C09800077
63652+:10FF8000A64200108D670004AE470024ACC801B83B
63653+:10FF9000AD2A01780A001304000000003C0A80002A
63654+:10FFA000354401009483000E3C0208008C4200D8C6
63655+:10FFB000240400803065FFFF245500013C01080047
63656+:10FFC000AC3500D80E000FDB240600030A001370C6
63657+:10FFD000000018210009320230D900FF2418000166
63658+:10FFE0001738FFD5000731C08F910020262200016D
63659+:10FFF000AF8200200A0013C800C0382100CB2024A3
63660+:020000021000EC
63661+:10000000AF85000010800008AF860004240D87FF34
63662+:1000100000CD6024158000083C0E006000AE302446
63663+:1000200010C00005000000000E000D42000000009E
63664+:100030000A001371000000000E0016050000000009
63665+:100040000A0013710000000030B980005320FF1F28
63666+:10005000AF8500003C02002000A2F82453E0FF1B03
63667+:10006000AF8500003C07FFFF34E47FFF00A4382485
63668+:100070000A00132B34C880000A001334010910242D
63669+:1000800000EC58245160005AAF8000248F8D002C62
63670+:100090003C0E0F0000EE182415A00075AF83001071
63671+:1000A00030EF010011E00073939800103C12020041
63672+:1000B000107200703C06800034D9010093280013B0
63673+:1000C0009789002A36A60002311800FF271600047F
63674+:1000D000001619C03C0480008C8501B804A0FFFE06
63675+:1000E00034880180AD0300003C158008AC830020FB
63676+:1000F00096BF004833E5FFFF10A001BCAF850008A4
63677+:100100002523001200A3102B504001B98F85000455
63678+:10011000348D010095AC0020240B001A30E440001F
63679+:10012000318AFFFFA10B000B108001BA2543FFFEAF
63680+:1001300000A3702B15C001B88F9600048F8F0004A8
63681+:10014000A503001435E50001AF8500043C088000DC
63682+:1001500035150180A6A9000EA6A9001A8F89000CEA
63683+:1001600030BF8000A6A70010AEA90028A6A60008F0
63684+:1001700013E0000F3C0F8000350C0100958B00163A
63685+:10018000316AFFFC25440004008818218C6240007D
63686+:100190003046FFFF14C000072416BFFF3C0EFFFFD0
63687+:1001A00035CD7FFF00AD2824AF8500043C0F8000D3
63688+:1001B0002416BFFF00B6902435E50180A4B20026C6
63689+:1001C000ACA7002C3C071000ADE701B80A00137083
63690+:1001D000000018210E00165D000000003C0A4000DF
63691+:1001E0003C098000AD2A01780A00130400000000D9
63692+:1001F0008F85002410A00027AF80001090A300007E
63693+:10020000106000742409000310690101000030210E
63694+:1002100090AE0001240D000211CD014230EF0040EC
63695+:1002200090A90001241F0001113F000930E20040A5
63696+:100230008CA600049785002A00C020210E000FDB49
63697+:1002400036A60002000040210A00137001001821A8
63698+:100250005040FFF88CA600043C07080190E7964147
63699+:1002600010E0FFF4240800010A00137001001821B7
63700+:10027000939800103C1F080127FF96400018C8C043
63701+:10028000033F4021AF8800248F85002414A0FFDBAA
63702+:10029000AF9800103C0480008C86400030C50100FF
63703+:1002A00010A0008732AB00043C0C08008D8C0024A9
63704+:1002B00024160004156000033192000D241600027C
63705+:1002C0003C0480008C8E4000340DFFFF11CD0113E3
63706+:1002D00032B5FFFB8C984000330F010055E0000160
63707+:1002E0002415001030E80800110000382409FFFB35
63708+:1002F0008F9F00282FF903EF53200001024990241B
63709+:1003000030E2010010400014325F00018F87002CA2
63710+:1003100014E0010E8F8C000C3C0480003486010038
63711+:1003200090C5001330AA00FF25430004000321C03C
63712+:100330002419FFFE025990241240000202B6302513
63713+:1003400032A6FFFF0E000FDB9785002A1240FEA3A6
63714+:1003500000001821325F000113E0000D3247000455
63715+:10036000240900011249000202B6302532A6FFFF1F
63716+:100370009785002A0E000FDB000020212402FFFEDB
63717+:10038000024290241240FE950000182132470004DA
63718+:1003900050E0FE922403000102B63025241600042A
63719+:1003A0005656000132A6FFFF9785002A0E000FDB8C
63720+:1003B000240401002403FFFB0243A82412A0FE87AB
63721+:1003C000000018210A001370240300010A0014B968
63722+:1003D0000249902410A0FFAF30E5010010A00017E3
63723+:1003E0008F8600102403000210C300148F84000CB9
63724+:1003F0003C0608008CC6003824CAFFFF14C0000267
63725+:10040000008A1024000010213C0E080025CE003880
63726+:10041000004E682191AC00048F850028258B0004D4
63727+:10042000000B21C030A5FFFF36A600020E000FDB37
63728+:10043000000000000A00137000001821240F0002C1
63729+:1004400010CF0088241600013C0308008C6300D004
63730+:100450001076008D8F85002824D9FFFC2F280004FA
63731+:100460001500006300002021241F0002107F005DA2
63732+:100470002CC9000C24C3FFF82C6200041440FFE9CF
63733+:100480000000202130EA020051400004000621C093
63734+:1004900054C0000530A5FFFF000621C030A5FFFFB6
63735+:1004A0000A00150436A600020E000FDB32A600017A
63736+:1004B0008F8600108F8500280A001520000621C0B5
63737+:1004C0003C0A08008D4A0024315200015240FE438C
63738+:1004D000000018219785002A36A600020E000FDBC7
63739+:1004E000000020210A001370000018219668000CFB
63740+:1004F000311802005700FE313C0500013C1F800806
63741+:1005000097F900489789002A36A600023328FFFF92
63742+:10051000AF8800083C0380008C7501B806A0FFFE80
63743+:100520003C04800034820180AC400000110000B621
63744+:1005300024180003252A0012010A182B106000B2AB
63745+:1005400000000000966F00203C0E8000240D001A71
63746+:1005500031ECFFFF35CA018030EB4000A14D000BAC
63747+:10056000116000B02583FFFE0103902B164000AE02
63748+:100570002416FFFE34A50001A5430014AF85000436
63749+:100580002419BFFF00B94024A6E9000EA6E9001A0D
63750+:10059000A6E60008A6E80026A6E700103C07100023
63751+:1005A000AE8701B80A001370000018213C048000D7
63752+:1005B0008C8201B80440FFFE349601802415001C93
63753+:1005C000AEC70000A2D5000B3C071000AC8701B8F5
63754+:1005D0003C0A40003C098000AD2A01780A0013045F
63755+:1005E000000000005120FFA424C3FFF800002021D8
63756+:1005F00030A5FFFF0A00150436A600020E00103DCC
63757+:10060000000000008F8700000A001346AF82000C34
63758+:1006100090A30001241500011075FF0B24080001B0
63759+:10062000240600021066000430E2004024080001A5
63760+:100630000A001370010018215040FFFD240800013A
63761+:100640003C0C8000358B0100956A001094A40002D8
63762+:100650003143FFFF5083FDE1010018210A00158599
63763+:10066000240800018F8500282CB203EF1240FDDB27
63764+:10067000240300013C0308008C6300D02416000111
63765+:100680001476FF7624D9FFFC2CD8000C1300FF72DF
63766+:10069000000621C030A5FFFF0A00150436A600029F
63767+:1006A00010B00037240F000B14AFFE23000731C039
63768+:1006B000312600FF00065600000A4E0305220047BF
63769+:1006C00030C6007F0006F8C03C16080126D69640CA
63770+:1006D00003F68021A2000001A20000003C0F600090
63771+:1006E0008DF918202405000100C588040011302769
63772+:1006F0000326C024000731C000C03821ADF81820FF
63773+:100700000A0013C8A60000028F850020000731C030
63774+:1007100024A2FFFF0A0013F6AF8200200A0014B2E1
63775+:100720002415002011E0FECC3C1980003728010080
63776+:100730009518001094B6000233120FFF16D2FEC6B1
63777+:10074000000000000A00148290A900013C0B080080
63778+:100750008D6B0038256DFFFF15600002018D1024A0
63779+:10076000000010213C080800250800380048C0217E
63780+:10077000930F000425EE00040A0014C5000E21C0EA
63781+:1007800000065202241F00FF115FFDEB000731C07D
63782+:10079000000A20C03C0E080125CE9640008EA821FC
63783+:1007A000009E602100095C02240D00013C076000EE
63784+:1007B000A2AD0000AD860000A2AB00018CF21820B3
63785+:1007C00024030001014310040242B025ACF61820B6
63786+:1007D00000C038210A0013C8A6A900020A0015AA01
63787+:1007E000AF8000200A0012FFAF84002C8F85000428
63788+:1007F0003C1980002408000337380180A308000B4F
63789+:100800000A00144D3C088000A2F8000B0A00155A9B
63790+:100810002419BFFF8F9600042412FFFE0A00144B18
63791+:1008200002D228242416FFFE0A00155800B62824F8
63792+:100830003C038000346401008C85000030A2003E3F
63793+:100840001440000800000000AC6000488C870000E5
63794+:1008500030E607C010C0000500000000AC60004C8E
63795+:10086000AC60005003E0000824020001AC600054BA
63796+:10087000AC6000408C880000310438001080FFF923
63797+:10088000000000002402000103E00008AC60004406
63798+:100890003C0380008C6201B80440FFFE3467018095
63799+:1008A000ACE4000024080001ACE00004A4E500086A
63800+:1008B00024050002A0E8000A34640140A0E5000B12
63801+:1008C0009483000A14C00008A4E30010ACE00024E4
63802+:1008D0003C07800034E901803C041000AD20002872
63803+:1008E00003E00008ACE401B88C8600043C0410006E
63804+:1008F000ACE600243C07800034E90180AD200028EC
63805+:1009000003E00008ACE401B83C0680008CC201B8EA
63806+:100910000440FFFE34C7018024090002ACE400005B
63807+:10092000ACE40004A4E50008A0E9000A34C50140D5
63808+:10093000A0E9000B94A8000A3C041000A4E80010F1
63809+:10094000ACE000248CA30004ACE3002803E0000822
63810+:10095000ACC401B83C039000346200010082202541
63811+:100960003C038000AC6400208C65002004A0FFFEE6
63812+:100970000000000003E00008000000003C028000CE
63813+:10098000344300010083202503E00008AC4400202C
63814+:1009900027BDFFE03C098000AFBF0018AFB10014D5
63815+:1009A000AFB00010352801408D10000091040009FF
63816+:1009B0009107000891050008308400FF30E600FF31
63817+:1009C00000061A002C820081008330251040002A86
63818+:1009D00030A50080000460803C0D080125AD92B078
63819+:1009E000018D58218D6A00000140000800000000C0
63820+:1009F0003C038000346201409445000A14A0001EAC
63821+:100A00008F91FCC09227000530E6000414C0001A44
63822+:100A1000000000000E00164E02002021922A000560
63823+:100A200002002021354900040E001658A2290005B5
63824+:100A30009228000531040004148000020000000028
63825+:100A40000000000D922D0000240B002031AC00FFAF
63826+:100A5000158B00093C0580008CAE01B805C0FFFE77
63827+:100A600034B10180AE3000003C0F100024100005AE
63828+:100A7000A230000BACAF01B80000000D8FBF001812
63829+:100A80008FB100148FB0001003E0000827BD0020D4
63830+:100A90000200202100C028218FBF00188FB1001450
63831+:100AA0008FB00010240600010A00161D27BD00208B
63832+:100AB0000000000D0200202100C028218FBF001877
63833+:100AC0008FB100148FB00010000030210A00161DF5
63834+:100AD00027BD002014A0FFE8000000000200202134
63835+:100AE0008FBF00188FB100148FB0001000C02821F4
63836+:100AF0000A00163B27BD00203C0780008CEE01B8A1
63837+:100B000005C0FFFE34F00180241F0002A21F000B6D
63838+:100B100034F80140A60600089719000A3C0F10009F
63839+:100B2000A61900108F110004A6110012ACEF01B835
63840+:100B30000A0016998FBF001827BDFFE8AFBF00104D
63841+:100B40000E000FD4000000003C0280008FBF001098
63842+:100B500000002021AC4001800A00108F27BD001842
63843+:100B60003084FFFF30A5FFFF108000070000182130
63844+:100B7000308200011040000200042042006518216C
63845+:100B80001480FFFB0005284003E0000800601021EE
63846+:100B900010C00007000000008CA2000024C6FFFF68
63847+:100BA00024A50004AC82000014C0FFFB24840004D0
63848+:100BB00003E000080000000010A0000824A3FFFFCD
63849+:100BC000AC86000000000000000000002402FFFFCF
63850+:100BD0002463FFFF1462FFFA2484000403E000088A
63851+:100BE000000000003C03800027BDFFF83462018054
63852+:100BF000AFA20000308C00FF30AD00FF30CE00FF10
63853+:100C00003C0B80008D6401B80480FFFE00000000F2
63854+:100C10008FA900008D6801288FAA00008FA700000F
63855+:100C20008FA400002405000124020002A085000A10
63856+:100C30008FA30000359940003C051000A062000B16
63857+:100C40008FB800008FAC00008FA600008FAF0000AF
63858+:100C500027BD0008AD280000AD400004AD80002491
63859+:100C6000ACC00028A4F90008A70D0010A5EE0012E2
63860+:100C700003E00008AD6501B83C06800827BDFFE829
63861+:100C800034C50080AFBF001090A7000924020012F5
63862+:100C900030E300FF1062000B008030218CA8005070
63863+:100CA00000882023048000088FBF00108CAA003425
63864+:100CB000240400390000282100CA4823052000052B
63865+:100CC000240600128FBF00102402000103E0000878
63866+:100CD00027BD00180E0016F2000000008FBF0010A4
63867+:100CE0002402000103E0000827BD001827BDFFC84B
63868+:100CF000AFB20030AFB00028AFBF0034AFB1002CAE
63869+:100D000000A0802190A5000D30A6001010C000109A
63870+:100D1000008090213C0280088C4400048E0300086F
63871+:100D20001064000C30A7000530A6000510C0009329
63872+:100D3000240400018FBF00348FB200308FB1002C2B
63873+:100D40008FB000280080102103E0000827BD003884
63874+:100D500030A7000510E0000F30AB001210C00006F5
63875+:100D6000240400013C0980088E0800088D25000439
63876+:100D70005105009C240400388FBF00348FB200302E
63877+:100D80008FB1002C8FB000280080102103E00008F4
63878+:100D900027BD0038240A0012156AFFE6240400016A
63879+:100DA0000200202127A500100E000CB6AFA00010F5
63880+:100DB0001440007C3C19800837240080909800087B
63881+:100DC000331100081220000A8FA7001030FF010025
63882+:100DD00013E000A48FA300148C8600580066102333
63883+:100DE000044000043C0A8008AC8300588FA7001020
63884+:100DF0003C0A800835480080910900083124000829
63885+:100E00001480000224080003000040213C1F8008D9
63886+:100E100093F1001193F9001237E600808CCC005456
63887+:100E2000333800FF03087821322D00FF000F708057
63888+:100E300001AE282100AC582B1160006F00000000AB
63889+:100E400094CA005C8CC900543144FFFF0125102373
63890+:100E50000082182B14600068000000008CCB005446
63891+:100E60000165182330EC00041180006C000830800C
63892+:100E70008FA8001C0068102B1040006230ED0004A9
63893+:100E8000006610232C46008010C00002004088211C
63894+:100E9000241100800E00164E024020213C0D8008D7
63895+:100EA00035A6008024070001ACC7000C90C80008DC
63896+:100EB0000011484035A70100310C007FA0CC00088C
63897+:100EC0008E05000424AB0001ACCB0030A4D1005C43
63898+:100ED0008CCA003C9602000E01422021ACC40020C6
63899+:100EE0008CC3003C0069F821ACDF001C8E190004A3
63900+:100EF000ACF900008E180008ACF800048FB10010A7
63901+:100F0000322F000855E0004793A60020A0C0004EF5
63902+:100F100090D8004E2411FFDFA0F8000890CF000801
63903+:100F200001F17024A0CE00088E0500083C0B80085B
63904+:100F300035690080AD2500388D6A00148D2200309F
63905+:100F40002419005001422021AD24003491230000D7
63906+:100F5000307F00FF13F90036264F01000E001658AF
63907+:100F60000240202124040038000028210E0016F23F
63908+:100F70002406000A0A001757240400010E000D2859
63909+:100F8000000020218FBF00348FB200308FB1002CC1
63910+:100F90008FB00028004020210080102103E00008CD
63911+:100FA00027BD00388E0E00083C0F800835F0008009
63912+:100FB000AE0E005402402021AE0000300E00164E4E
63913+:100FC00000000000920D00250240202135AC0020D9
63914+:100FD0000E001658A20C00250E000CAC0240202179
63915+:100FE000240400382405008D0E0016F22406001299
63916+:100FF0000A0017572404000194C5005C0A001792E8
63917+:1010000030A3FFFF2407021811A0FF9E00E6102363
63918+:101010008FAE001C0A00179A01C610230A0017970A
63919+:101020002C620218A0E600080A0017C48E0500080A
63920+:101030002406FF8001E6C0243C118000AE38002861
63921+:101040008E0D000831E7007F3C0E800C00EE602121
63922+:10105000AD8D00E08E080008AF8C00380A0017D074
63923+:10106000AD8800E4AC800058908500082403FFF7A9
63924+:1010700000A33824A08700080A0017758FA7001066
63925+:101080003C05080024A560A83C04080024846FF4F3
63926+:101090003C020800244260B0240300063C01080121
63927+:1010A000AC2596C03C010801AC2496C43C01080163
63928+:1010B000AC2296C83C010801A02396CC03E00008AE
63929+:1010C0000000000003E00008240200013C02800050
63930+:1010D000308800FF344701803C0680008CC301B893
63931+:1010E0000460FFFE000000008CC501282418FF806A
63932+:1010F0003C0D800A24AF010001F8702431EC007F20
63933+:10110000ACCE0024018D2021ACE50000948B00EAD8
63934+:101110003509600024080002316AFFFFACEA0004D0
63935+:1011200024020001A4E90008A0E8000BACE00024C0
63936+:101130003C071000ACC701B8AF84003803E00008DA
63937+:10114000AF85006C938800488F8900608F820038DB
63938+:1011500030C600FF0109382330E900FF01221821C1
63939+:1011600030A500FF2468008810C000020124382147
63940+:101170000080382130E400031480000330AA00030B
63941+:101180001140000D312B000310A0000900001021B8
63942+:1011900090ED0000244E000131C200FF0045602B9D
63943+:1011A000A10D000024E700011580FFF925080001CA
63944+:1011B00003E00008000000001560FFF300000000DD
63945+:1011C00010A0FFFB000010218CF80000245900043F
63946+:1011D000332200FF0045782BAD18000024E70004FF
63947+:1011E00015E0FFF92508000403E0000800000000F6
63948+:1011F00093850048938800588F8700600004320070
63949+:101200003103007F00E5102B30C47F001040000F39
63950+:10121000006428258F8400383C0980008C8A00EC0B
63951+:10122000AD2A00A43C03800000A35825AC6B00A0AD
63952+:101230008C6C00A00580FFFE000000008C6D00ACEF
63953+:10124000AC8D00EC03E000088C6200A80A00188254
63954+:101250008F840038938800593C0280000080502120
63955+:10126000310300FEA383005930ABFFFF30CC00FFF9
63956+:1012700030E7FFFF344801803C0980008D2401B82D
63957+:101280000480FFFE8F8D006C24180016AD0D000049
63958+:101290008D2201248F8D0038AD0200048D5900206D
63959+:1012A000A5070008240201C4A119000AA118000B17
63960+:1012B000952F01208D4E00088D4700049783005C18
63961+:1012C0008D59002401CF302100C7282100A32023FD
63962+:1012D0002418FFFFA504000CA50B000EA5020010AA
63963+:1012E000A50C0012AD190018AD18002495AF00E848
63964+:1012F0003C0B10002407FFF731EEFFFFAD0E002876
63965+:101300008DAC0084AD0C002CAD2B01B88D460020B7
63966+:1013100000C7282403E00008AD4500208F8800386E
63967+:101320000080582130E7FFFF910900D63C02800081
63968+:1013300030A5FFFF312400FF00041A00006750258C
63969+:1013400030C600FF344701803C0980008D2C01B875
63970+:101350000580FFFE8F82006C240F0017ACE20000B6
63971+:101360008D390124ACF900048D780020A4EA00082E
63972+:10137000241901C4A0F8000AA0EF000B9523012056
63973+:101380008D6E00088D6D00049784005C01C35021B0
63974+:10139000014D602101841023A4E2000CA4E5000E9D
63975+:1013A000A4F90010A4E60012ACE000148D7800242B
63976+:1013B000240DFFFFACF800188D0F007CACEF001C73
63977+:1013C0008D0E00783C0F1000ACEE0020ACED002438
63978+:1013D000950A00BE240DFFF73146FFFFACE600285A
63979+:1013E000950C00809504008231837FFF0003CA00C2
63980+:1013F0003082FFFF0322C021ACF8002CAD2F01B8D2
63981+:10140000950E00828D6A002000AE3021014D282407
63982+:10141000A506008203E00008AD6500203C028000C4
63983+:10142000344501803C0480008C8301B80460FFFED9
63984+:101430008F8A0044240600199549001C3128FFFFBB
63985+:10144000000839C0ACA70000A0A6000B3C051000A6
63986+:1014500003E00008AC8501B88F87004C0080402174
63987+:1014600030C400FF3C0680008CC201B80440FFFE7F
63988+:101470008F89006C9383006834996000ACA90000E8
63989+:10148000A0A300058CE20010240F00022403FFF744
63990+:10149000A4A20006A4B900088D180020A0B8000A74
63991+:1014A000A0AF000B8CEE0000ACAE00108CED000481
63992+:1014B000ACAD00148CEC001CACAC00248CEB002018
63993+:1014C000ACAB00288CEA002C3C071000ACAA002C26
63994+:1014D0008D090024ACA90018ACC701B88D05002007
63995+:1014E00000A3202403E00008AD0400208F8600380C
63996+:1014F00027BDFFE0AFB10014AFBF0018AFB00010C0
63997+:1015000090C300D430A500FF3062002010400008D6
63998+:10151000008088218CCB00D02409FFDF256A0001E0
63999+:10152000ACCA00D090C800D401093824A0C700D4A8
64000+:1015300014A000403C0C80008F840038908700D4B9
64001+:101540002418FFBF2406FFEF30E3007FA08300D400
64002+:10155000979F005C8F8200608F8D003803E2C82364
64003+:10156000A799005CA5A000BC91AF00D401F870243D
64004+:10157000A1AE00D48F8C0038A18000D78F8A0038AC
64005+:10158000A5400082AD4000EC914500D400A658244F
64006+:10159000A14B00D48F9000348F8400609786005C4C
64007+:1015A0000204282110C0000FAF850034A38000582A
64008+:1015B0003C0780008E2C000894ED01208E2B000447
64009+:1015C000018D5021014B8021020620233086FFFF30
64010+:1015D00030C8000F3909000131310001162000091F
64011+:1015E000A3880058938600488FBF00188FB100145D
64012+:1015F0008FB0001027BD0020AF85006403E0000815
64013+:10160000AF86006000C870238FBF00189386004823
64014+:101610008FB100148FB0001034EF0C00010F28219F
64015+:1016200027BD0020ACEE0084AF85006403E0000815
64016+:10163000AF86006035900180020028210E00190F4E
64017+:10164000240600828F840038908600D430C5004084
64018+:1016500050A0FFBAA38000688F85004C3C06800034
64019+:101660008CCD01B805A0FFFE8F89006C2408608234
64020+:1016700024070002AE090000A6080008A207000B1C
64021+:101680008CA300083C0E1000AE0300108CA2000CCE
64022+:10169000AE0200148CBF0014AE1F00188CB90018E5
64023+:1016A000AE1900248CB80024AE1800288CAF002896
64024+:1016B000AE0F002CACCE01B80A001948A380006818
64025+:1016C0008F8A003827BDFFE0AFB10014AFB0001023
64026+:1016D0008F880060AFBF00189389003C954200BC22
64027+:1016E00030D100FF0109182B0080802130AC00FFB1
64028+:1016F0003047FFFF0000582114600003310600FF4F
64029+:1017000001203021010958239783005C0068202BB9
64030+:101710001480002700000000106800562419000102
64031+:101720001199006334E708803165FFFF0E0018C08F
64032+:10173000020020218F83006C3C07800034E601808A
64033+:101740003C0580008CAB01B80560FFFE240A001840
64034+:101750008F840038ACC30000A0CA000B948900BE7F
64035+:101760003C081000A4C90010ACC00030ACA801B8FF
64036+:101770009482008024430001A4830080949F008011
64037+:101780003C0608008CC6318833EC7FFF1186005E72
64038+:101790000000000002002021022028218FBF001835
64039+:1017A0008FB100148FB000100A00193427BD00203B
64040+:1017B000914400D42403FF8000838825A15100D4E4
64041+:1017C0009784005C3088FFFF51000023938C003C1D
64042+:1017D0008F8500382402EFFF008B782394AE00BC85
64043+:1017E0000168502B31E900FF01C26824A4AD00BCA0
64044+:1017F00051400039010058213C1F800037E60100AC
64045+:101800008CD800043C190001031940245500000144
64046+:1018100034E740008E0A00202403FFFB241100015E
64047+:1018200001432024AE0400201191002D34E78000F4
64048+:1018300002002021012030210E0018C03165FFFF79
64049+:101840009787005C8F890060A780005C0127802358
64050+:10185000AF900060938C003C8F8B00388FBF0018D6
64051+:101860008FB100148FB0001027BD002003E00008E6
64052+:10187000A16C00D73C0D800035AA01008D48000402
64053+:101880003C0900010109282454A0000134E740006C
64054+:101890008E0F00202418FFFB34E7800001F870242D
64055+:1018A00024190001AE0E00201599FF9F34E708802F
64056+:1018B000020020210E00188E3165FFFF020020215A
64057+:1018C000022028218FBF00188FB100148FB00010A4
64058+:1018D0000A00193427BD00200A0019F7000048212A
64059+:1018E00002002021012030210E00188E3165FFFFFB
64060+:1018F0009787005C8F890060A780005C01278023A8
64061+:101900000A001A0EAF900060948C0080241F8000A3
64062+:10191000019F3024A4860080908B0080908F0080EF
64063+:10192000316700FF0007C9C20019C027001871C045
64064+:1019300031ED007F01AE2825A08500800A0019DF67
64065+:1019400002002021938500682403000127BDFFE8E1
64066+:1019500000A330042CA20020AFB00010AFBF0014D1
64067+:1019600000C01821104000132410FFFE3C0708009F
64068+:101970008CE7319000E610243C088000350501809A
64069+:1019800014400005240600848F890038240A0004CE
64070+:101990002410FFFFA12A00FC0E00190F0000000018
64071+:1019A000020010218FBF00148FB0001003E0000868
64072+:1019B00027BD00183C0608008CC631940A001A574F
64073+:1019C00000C310248F87004427BDFFE0AFB200188A
64074+:1019D000AFB10014AFB00010AFBF001C30D000FF9B
64075+:1019E00090E6000D00A088210080902130C5007F86
64076+:1019F000A0E5000D8F8500388E2300188CA200D042
64077+:101A00001062002E240A000E0E001A4AA38A0068F3
64078+:101A10002409FFFF104900222404FFFF5200002088
64079+:101A2000000020218E2600003C0C001000CC582421
64080+:101A3000156000393C0E000800CE682455A0003F18
64081+:101A4000024020213C18000200D880241200001F10
64082+:101A50003C0A00048F8700448CE200148CE30010E1
64083+:101A60008CE500140043F82303E5C82B1320000580
64084+:101A7000024020218E24002C8CF1001010910031A6
64085+:101A80000240202124020012A38200680E001A4A9C
64086+:101A90002412FFFF105200022404FFFF0000202147
64087+:101AA0008FBF001C8FB200188FB100148FB00010D0
64088+:101AB0000080102103E0000827BD002090A800D47A
64089+:101AC000350400200A001A80A0A400D400CA4824CB
64090+:101AD0001520000B8F8B00448F8D00448DAC0010BF
64091+:101AE0001580000B024020218E2E002C51C0FFECEF
64092+:101AF00000002021024020210A001A9B2402001726
64093+:101B00008D66001050C0FFE6000020210240202119
64094+:101B10000A001A9B24020011024020212402001511
64095+:101B20000E001A4AA3820068240FFFFF104FFFDC4B
64096+:101B30002404FFFF0A001A8A8E2600000A001AC138
64097+:101B4000240200143C08000400C8382450E0FFD4EC
64098+:101B500000002021024020210A001A9B24020013C9
64099+:101B60008F85003827BDFFD8AFB3001CAFB2001877
64100+:101B7000AFB10014AFB00010AFBF002090A700D4E9
64101+:101B80008F90004C2412FFFF34E2004092060000C8
64102+:101B9000A0A200D48E0300100080982110720006CD
64103+:101BA00030D1003F2408000D0E001A4AA3880068B7
64104+:101BB000105200252404FFFF8F8A00388E09001878
64105+:101BC0008D4400D01124000702602021240C000E57
64106+:101BD0000E001A4AA38C0068240BFFFF104B001A5A
64107+:101BE0002404FFFF24040020122400048F8D0038F9
64108+:101BF00091AF00D435EE0020A1AE00D48F85005403
64109+:101C000010A00019000000001224004A8F9800382C
64110+:101C10008F92FCC0971000809651000A5230004805
64111+:101C20008F9300403C1F08008FFF318C03E5C82BC9
64112+:101C30001720001E02602021000028210E0019A993
64113+:101C400024060001000020218FBF00208FB3001C5C
64114+:101C50008FB200188FB100148FB0001000801021D7
64115+:101C600003E0000827BD00285224002A8E05001436
64116+:101C70008F840038948A008025490001A48900805F
64117+:101C8000948800803C0208008C42318831077FFF35
64118+:101C900010E2000E00000000026020210E00193446
64119+:101CA000240500010A001B0B000020212402002D46
64120+:101CB0000E001A4AA38200682403FFFF1443FFE1C9
64121+:101CC0002404FFFF0A001B0C8FBF002094990080A2
64122+:101CD000241F800024050001033FC024A498008035
64123+:101CE00090920080908E0080325100FF001181C2DE
64124+:101CF00000107827000F69C031CC007F018D582576
64125+:101D0000A08B00800E001934026020210A001B0BFA
64126+:101D1000000020212406FFFF54A6FFD68F84003840
64127+:101D2000026020210E001934240500010A001B0B5B
64128+:101D300000002021026020210A001B252402000A45
64129+:101D40002404FFFD0A001B0BAF9300608F8800384E
64130+:101D500027BDFFE8AFB00010AFBF0014910A00D458
64131+:101D60008F87004C00808021354900408CE60010B0
64132+:101D7000A10900D43C0208008C4231B030C53FFFBD
64133+:101D800000A2182B106000078F850050240DFF80E3
64134+:101D900090AE000D01AE6024318B00FF156000088D
64135+:101DA0000006C382020020212403000D8FBF00140F
64136+:101DB0008FB0001027BD00180A001A4AA3830068DC
64137+:101DC00033060003240F000254CFFFF70200202146
64138+:101DD00094A2001C8F85003824190023A4A200E8D7
64139+:101DE0008CE8000000081E02307F003F13F9003528
64140+:101DF0003C0A00838CE800188CA600D0110600086D
64141+:101E0000000000002405000E0E001A4AA385006899
64142+:101E10002407FFFF104700182404FFFF8F850038B8
64143+:101E200090A900D435240020A0A400D48F8C0044B5
64144+:101E3000918E000D31CD007FA18D000D8F83005458
64145+:101E40001060001C020020218F8400508C9800102C
64146+:101E50000303782B11E0000D241900180200202143
64147+:101E6000A39900680E001A4A2410FFFF10500002C8
64148+:101E70002404FFFF000020218FBF00148FB000104A
64149+:101E80000080102103E0000827BD00188C86001098
64150+:101E90008F9F00440200202100C31023AFE20010F6
64151+:101EA000240500010E0019A9240600010A001B9751
64152+:101EB000000020210E001934240500010A001B97A0
64153+:101EC00000002021010A5824156AFFD98F8C004494
64154+:101ED000A0A600FC0A001B84A386005A30A500FFC0
64155+:101EE0002406000124A9000100C9102B1040000C99
64156+:101EF00000004021240A000100A61823308B0001B5
64157+:101F000024C60001006A3804000420421160000267
64158+:101F100000C9182B010740251460FFF800A61823FC
64159+:101F200003E000080100102127BDFFD8AFB0001862
64160+:101F30008F90004CAFB1001CAFBF00202403FFFF07
64161+:101F40002411002FAFA30010920600002405000802
64162+:101F500026100001006620260E001BB0308400FF12
64163+:101F600000021E003C021EDC34466F410A001BD8F2
64164+:101F70000000102110A00009008018212445000154
64165+:101F800030A2FFFF2C4500080461FFFA0003204047
64166+:101F90000086202614A0FFF9008018210E001BB037
64167+:101FA000240500208FA300102629FFFF313100FFF8
64168+:101FB00000034202240700FF1627FFE20102182651
64169+:101FC00000035027AFAA0014AFAA00100000302170
64170+:101FD00027A8001027A7001400E6782391ED00033E
64171+:101FE00024CE000100C8602131C600FF2CCB0004C4
64172+:101FF0001560FFF9A18D00008FA200108FBF002097
64173+:102000008FB1001C8FB0001803E0000827BD002826
64174+:1020100027BDFFD0AFB3001CAFB00010AFBF00288A
64175+:10202000AFB50024AFB40020AFB20018AFB10014B8
64176+:102030003C0C80008D880128240FFF803C06800A1C
64177+:1020400025100100250B0080020F68243205007F57
64178+:10205000016F7024AD8E009000A62821AD8D002464
64179+:1020600090A600FC3169007F3C0A8004012A1821F7
64180+:10207000A386005A9067007C00809821AF830030CF
64181+:1020800030E20002AF88006CAF85003800A0182154
64182+:10209000144000022404003424040030A3840048C7
64183+:1020A0008C7200DC30D100FF24040004AF92006089
64184+:1020B00012240004A38000688E7400041680001EA1
64185+:1020C0003C0880009386005930C7000110E0000FE3
64186+:1020D0008F9300608CB000848CA800842404FF805F
64187+:1020E000020410240002F940310A007F03EA482567
64188+:1020F0003C0C2000012C902530CD00FE3C038000DC
64189+:10210000AC720830A38D00598F9300608FBF0028F8
64190+:102110008FB50024ACB300DC8FB400208FB3001C5B
64191+:102120008FB200188FB100148FB00010240200018C
64192+:1021300003E0000827BD00308E7F000895020120D3
64193+:102140008E67001003E2C8213326FFFF30D8000F4E
64194+:1021500033150001AF87003416A00058A39800582B
64195+:1021600035090C000309382100D81823AD03008479
64196+:10217000AF8700648E6A00043148FFFF1100007EC3
64197+:10218000A78A005C90AC00D42407FF8000EC3024C8
64198+:1021900030CB00FF1560004B9786005C938E005A91
64199+:1021A000240D000230D5FFFF11CD02A20000A021B6
64200+:1021B0008F85006002A5802B160000BC9388004824
64201+:1021C0003C11800096240120310400FF1485008812
64202+:1021D0008F8400648F9800343312000356400085CA
64203+:1021E00030A500FF8F900064310C00FF24060034FE
64204+:1021F00011860095AF90004C9204000414800118E1
64205+:102200008F8E0038A380003C8E0D00048DC800D84E
64206+:102210003C0600FF34CCFFFF01AC30240106182B34
64207+:1022200014600120AF8600548F8700609798005C8F
64208+:10223000AF8700400307402310C000C7A788005C99
64209+:102240008F91003030C3000300035823922A007C92
64210+:102250003171000302261021000A20823092000111
64211+:102260000012488000492821311FFFFF03E5C82BD9
64212+:10227000132001208F8800388F8500348F880064F8
64213+:102280001105025A3C0E3F018E0600003C0C250051
64214+:1022900000CE682411AC01638F84004C30E500FF50
64215+:1022A0000E00184A000030218F8800388F870060A8
64216+:1022B0008F8500340A001DB78F8600540A001C5613
64217+:1022C000AF87006490A400D400E48024320200FFB1
64218+:1022D000104000169386005990A6008890AE00D753
64219+:1022E00024A8008830D4003F2686FFE02CD10020AF
64220+:1022F000A38E003C1220000CAF88004C240B000180
64221+:1023000000CB20043095001916A0012B3C0680005C
64222+:1023100034CF0002008FC0241700022E3099002015
64223+:1023200017200234000000009386005930CB0001D2
64224+:102330001160000F9788005C8CBF00848CA900841A
64225+:10234000240AFF8003EA6024000C19403132007F28
64226+:10235000007238253C0D200000EDC82530D800FE65
64227+:102360003C0F8000ADF90830A39800599788005CB5
64228+:102370001500FF84000000008E630020306200041E
64229+:102380001040FF51938600592404FFFB0064802411
64230+:102390003C038000AE700020346601808C7301B86D
64231+:1023A0000660FFFE8F98006C347501003C1400013C
64232+:1023B000ACD800008C6B012424076085ACCB0004F2
64233+:1023C0008EAE000401D488245220000124076083CB
64234+:1023D00024190002A4C700083C0F1000A0D9000B6C
64235+:1023E0003C068000ACCF01B80A001C2B9386005934
64236+:1023F00030A500FF0E00184A240600018F88006CEB
64237+:102400003C05800034A90900250201889388004812
64238+:10241000304A0007304B00783C0340802407FF809F
64239+:102420000163C825014980210047F824310C00FFD1
64240+:1024300024060034ACBF0800AF90004CACB90810C3
64241+:102440005586FF6E920400048F8400388E11003090
64242+:10245000908E00D431CD001015A000108F83006045
64243+:102460002C6F000515E000E400000000909800D4F7
64244+:102470002465FFFC331200101640000830A400FF52
64245+:102480008F9F00648F99003413F90004388700018E
64246+:1024900030E20001144001C8000000000E001BC320
64247+:1024A000000000000A001DF8000000008F84006496
64248+:1024B00030C500FF0E00184A24060001939800481A
64249+:1024C000240B0034130B00A08F8500388F8600602A
64250+:1024D0009783005C306EFFFF00CE8823AF910060D1
64251+:1024E000A780005C1280FF90028018212414FFFD59
64252+:1024F0005474FFA28E6300208E6A00042403FFBF81
64253+:102500002408FFEF0155F823AE7F000490AC00D4FF
64254+:102510003189007FA0A900D48E7200208F8F0038EF
64255+:10252000A780005C364D0002AE6D0020A5E000BC27
64256+:1025300091E500D400A3C824A1F900D48F950038F8
64257+:10254000AEA000EC92B800D403085824A2AB00D48B
64258+:102550000A001CD78F8500388F910034AF8000604F
64259+:1025600002275821AF8B0034000020212403FFFFF5
64260+:10257000108301B48F8500388E0C00103C0D0800CC
64261+:102580008DAD31B09208000031843FFF008D802B6B
64262+:1025900012000023310D003F3C1908008F3931A88B
64263+:1025A0008F9F006C000479802408FF80033F202166
64264+:1025B000008FC821938500590328F8243C06008029
64265+:1025C0003C0F800034D80001001F91403331007F60
64266+:1025D0008F8600380251502535EE0940332B0078A4
64267+:1025E000333000073C0310003C02800C017890253A
64268+:1025F000020E48210143C0250222382134AE0001D9
64269+:10260000ADFF0804AF890050ADF20814AF87004455
64270+:10261000ADFF0028ACD90084ADF80830A38E005976
64271+:102620009383005A24070003106700272407000142
64272+:102630001467FFAC8F8500382411002311B1008589
64273+:1026400000000000240E000B026020210E001A4A38
64274+:10265000A38E00680040A0210A001D328F8500383B
64275+:1026600002602021240B000C0E001A4AA38B006884
64276+:10267000240AFFFF104AFFBD2404FFFF8F8E00389D
64277+:10268000A380003C8E0D00048DC800D83C0600FFDE
64278+:1026900034CCFFFF01AC30240106182B1060FEE2A1
64279+:1026A000AF86005402602021241200190E001A4A3D
64280+:1026B000A3920068240FFFFF104FFFAC2404FFFF1C
64281+:1026C0000A001C838F86005425A3FFE02C74002091
64282+:1026D0001280FFDD240E000B000328803C1108014E
64283+:1026E000263194B400B148218D2D000001A00008CE
64284+:1026F000000000008F85003400A710219385003C66
64285+:10270000AF82003402251821A383003C951F00BC32
64286+:102710000226282137F91000A51900BC5240FF926B
64287+:10272000AF850060246A0004A38A003C950900BCC0
64288+:1027300024A40004AF84006035322000A51200BC40
64289+:102740000A001D54000020218F8600602CC800055F
64290+:102750001500FF609783005C3065FFFF00C5C8234C
64291+:102760002F2F000511E00003306400FF24CDFFFC93
64292+:1027700031A400FF8F8900648F920034113200046D
64293+:10278000389F000133EC0001158001380000000083
64294+:102790008F840038908700D434E60010A08600D4DF
64295+:1027A0008F8500388F8600609783005CACA000ECBA
64296+:1027B0000A001D2F306EFFFF8CB500848CB400849E
64297+:1027C0003C04100002A7302400068940328E007FAE
64298+:1027D000022E8025020410253C08800024050001FB
64299+:1027E00002602021240600010E0019A9AD02083064
64300+:1027F0000A001CC38F8500388C8200EC1222FE7EFA
64301+:102800000260202124090005A38900680E001A4AED
64302+:102810002411FFFF1451FE782404FFFF0A001D5508
64303+:102820002403FFFF8F8F004C8F8800388DF8000045
64304+:10283000AD1800888DE70010AD0700988F87006005
64305+:102840000A001DB78F8600542406FFFF118600057D
64306+:10285000000000000E001B4C026020210A001D8FAA
64307+:102860000040A0210E001AD1026020210A001D8F15
64308+:102870000040A0218F90004C3C0208008C4231B0F7
64309+:102880008E110010322C3FFF0182282B10A0000C6B
64310+:10289000240BFF808F85005090A3000D01637024EE
64311+:1028A00031CA00FF1140000702602021001143825D
64312+:1028B000310600032418000110D8010600000000B2
64313+:1028C000026020212403000D0E001A4AA383006831
64314+:1028D000004020218F8500380A001D320080A02191
64315+:1028E0008F90004C3C0A08008D4A31B08F85005013
64316+:1028F0008E0400100000A0218CB1001430823FFF34
64317+:10290000004A602B8CB200205180FFEE0260202133
64318+:1029100090B8000D240BFF800178702431C300FFB4
64319+:102920005060FFE80260202100044382310600036A
64320+:1029300014C0FFE40260202194BF001C8F9900386E
64321+:102940008E060028A73F00E88CAF0010022F20233E
64322+:1029500014C4013A026020218F83005400C368210F
64323+:10296000022D382B14E00136240200188F8A00440F
64324+:102970008F820030024390218D4B00100163702341
64325+:10298000AD4E0010AD5200208C4C00740192282BEB
64326+:1029900014A0015F026020218F8400508E08002463
64327+:1029A0008C86002411060007026020212419001CD7
64328+:1029B0000E001A4AA3990068240FFFFF104FFFC5AD
64329+:1029C0002404FFFF8F8400448C87002424FF00012F
64330+:1029D000AC9F00241251012F8F8D00308DB10074F7
64331+:1029E0001232012C3C0B00808E0E000001CB5024D3
64332+:1029F00015400075000000008E0300142411FFFF35
64333+:102A0000107100073C0808003C0608008CC6319095
64334+:102A100000C8C0241300015202602021A380006876
64335+:102A20008E0300003C19000100792024108000135F
64336+:102A30003C1F0080007FA02416800009020028218E
64337+:102A4000026020212411001A0E001A4AA391006886
64338+:102A50002407FFFF1047FF9F2404FFFF02002821E7
64339+:102A6000026020210E001A6A240600012410FFFFD4
64340+:102A70001050FF982404FFFF241400018F8D0044A0
64341+:102A8000026020210280302195A900342405000134
64342+:102A9000253200010E0019A9A5B200340000202142
64343+:102AA0008F8500380A001D320080A0218F90004CD5
64344+:102AB0003C1408008E9431B08E07001030E53FFFC3
64345+:102AC00000B4C82B132000618F8600502412FF80B1
64346+:102AD00090C9000D0249682431A400FF5080005CB9
64347+:102AE000026020218F8C00541180000700078B8228
64348+:102AF0008F8500388F82FCC094BF0080944A000A02
64349+:102B0000515F00F78F8600403227000314E0006415
64350+:102B100000000000920E000211C000D8000000006A
64351+:102B20008E0B0024156000D902602021920400035E
64352+:102B300024190002308500FF14B90005308900FF18
64353+:102B40008F940054128000EA240D002C308900FF7D
64354+:102B5000392C00102D8400012D3200010244302553
64355+:102B6000020028210E001A6A026020212410FFFFB3
64356+:102B7000105000BF8F8500388F830054106000D341
64357+:102B8000240500013C0A08008D4A318C0143F82BD2
64358+:102B900017E000B22402002D02602021000028214D
64359+:102BA0000E0019A9240600018F85003800001821A5
64360+:102BB0000A001D320060A0210E0018750000000000
64361+:102BC0000A001DF800000000AC8000200A001E78FA
64362+:102BD0008E03001400002821026020210E0019A994
64363+:102BE000240600010A001CC38F8500380A001DB7A7
64364+:102BF0008F8800388CAA00848CAC00843C031000C1
64365+:102C00000147F824001F91403189007F024968255F
64366+:102C100001A32825ACC50830910700012405000157
64367+:102C2000026020210E0019A930E600010A001CC331
64368+:102C30008F850038938F00482403FFFD0A001D3460
64369+:102C4000AF8F00600A001D342403FFFF02602021C3
64370+:102C50002410000D0E001A4AA390006800401821AD
64371+:102C60008F8500380A001D320060A0210E00187503
64372+:102C7000000000009783005C8F86006000402021E8
64373+:102C80003070FFFF00D010232C4A00051140FE11C8
64374+:102C90008F850038ACA400EC0A001D2F306EFFFFBA
64375+:102CA00090CF000D31E300085460FFA192040003AF
64376+:102CB00002602021240200100E001A4AA38200683C
64377+:102CC0002403FFFF5443FF9A920400030A001F12DB
64378+:102CD0008F85003890A4000D308F000811E000951A
64379+:102CE0008F990054572000A6026020218E1F000CEF
64380+:102CF0008CB4002057F40005026020218E0D0008DE
64381+:102D00008CA7002411A7003A026020212402002091
64382+:102D1000A38200680E001A4A2412FFFF1052FEED33
64383+:102D20002404FFFF8F9F00442402FFF73C14800E11
64384+:102D300093EA000D2419FF803C03800001423824EF
64385+:102D4000A3E7000D8F9F00303C0908008D2931ACAE
64386+:102D50008F8C006C97F200788F870044012C302113
64387+:102D6000324D7FFF000D204000C4782131E5007F07
64388+:102D700000B4C02101F94024AC68002CA711000068
64389+:102D80008CEB0028256E0001ACEE00288CEA002CAC
64390+:102D90008E02002C01426021ACEC002C8E09002C2C
64391+:102DA000ACE900308E120014ACF2003494ED003A1D
64392+:102DB00025A40001A4E4003A97E600783C1108003D
64393+:102DC0008E3131B024C3000130707FFF1211005CDE
64394+:102DD000006030218F8F0030026020212405000127
64395+:102DE0000E001934A5E600780A001EA1000020217B
64396+:102DF0008E0900142412FFFF1132006B8F8A0038F5
64397+:102E00008E0200188D4C00D0144C00650260202109
64398+:102E10008E0B00248CAE0028116E005B2402002172
64399+:102E20000E001A4AA38200681452FFBE2404FFFF5A
64400+:102E30008F8500380A001D320080A0212402001F67
64401+:102E40000E001A4AA38200682409FFFF1049FEA160
64402+:102E50002404FFFF0A001E548F83005402602021C7
64403+:102E60000E001A4AA38200681450FF508F85003864
64404+:102E70002403FFFF0A001D320060A0218CD800242B
64405+:102E80008E0800241118FF29026020210A001F2744
64406+:102E90002402000F8E0900003C05008001259024CB
64407+:102EA0001640FF492402001A026020210E001A4A2F
64408+:102EB000A3820068240CFFFF144CFECF2404FFFF04
64409+:102EC0008F8500380A001D320080A0210E001934C1
64410+:102ED000026020218F8500380A001EE500001821BD
64411+:102EE0002403FFFD0060A0210A001D32AF860060B0
64412+:102EF000026020210E001A4AA38D00682403FFFF00
64413+:102F00001043FF588F8500380A001ECC920400033E
64414+:102F10002418001D0E001A4AA39800682403FFFF1E
64415+:102F20001443FE9D2404FFFF8F8500380A001D32E4
64416+:102F30000080A021026020210A001F3D24020024FD
64417+:102F4000240880000068C024330BFFFF000B73C20D
64418+:102F500031D000FF001088270A001F6E001133C017
64419+:102F6000240F001B0E001A4AA38F00681451FEACF8
64420+:102F70002404FFFF8F8500380A001D320080A02145
64421+:102F80000A001F3D240200278E0600288CA3002C77
64422+:102F900010C30008026020210A001F812402001FC4
64423+:102FA0000A001F812402000E026020210A001F81F6
64424+:102FB000240200258E04002C1080000D8F8F00301D
64425+:102FC0008DE800740104C02B5700000C0260202122
64426+:102FD0008CB900140086A0210334282B10A0FF52C6
64427+:102FE0008F9F0044026020210A001F8124020022DA
64428+:102FF000026020210A001F81240200230A001F8191
64429+:103000002402002627BDFFD8AFB3001CAFB10014C7
64430+:10301000AFBF0020AFB20018AFB000103C0280007C
64431+:103020008C5201408C4B01483C048000000B8C0208
64432+:10303000322300FF317300FF8C8501B804A0FFFE2E
64433+:1030400034900180AE1200008C8701442464FFF0AC
64434+:10305000240600022C830013AE070004A61100080A
64435+:10306000A206000BAE1300241060004F8FBF00209B
64436+:10307000000448803C0A0801254A9534012A402171
64437+:103080008D04000000800008000000003C030800E0
64438+:103090008C6331A831693FFF00099980007280215B
64439+:1030A000021370212405FF80264D0100264C00806C
64440+:1030B0003C02800031B1007F3198007F31CA007F2F
64441+:1030C0003C1F800A3C1980043C0F800C01C5202461
64442+:1030D00001A5302401853824014F1821AC46002475
64443+:1030E000023F402103194821AC470090AC4400281E
64444+:1030F000AF830044AF880038AF8900300E0019005C
64445+:10310000016080213C0380008C6B01B80560FFFEEC
64446+:103110008F8700448F8600383465018090E8000D69
64447+:10312000ACB20000A4B0000600082600000416039C
64448+:1031300000029027001227C21080008124C200885C
64449+:10314000241F6082A4BF0008A0A000052402000282
64450+:10315000A0A2000B8F8B0030000424003C08270045
64451+:1031600000889025ACB20010ACA00014ACA00024E4
64452+:10317000ACA00028ACA0002C8D6900382413FF807F
64453+:10318000ACA9001890E3000D02638024320500FF13
64454+:1031900010A000058FBF002090ED000D31AC007F26
64455+:1031A000A0EC000D8FBF00208FB3001C8FB2001861
64456+:1031B0008FB100148FB000103C0A10003C0E80004C
64457+:1031C00027BD002803E00008ADCA01B8265F010052
64458+:1031D0002405FF8033F8007F3C06800003E5782457
64459+:1031E0003C19800A03192021ACCF0024908E00D412
64460+:1031F00000AE682431AC00FF11800024AF84003899
64461+:10320000248E008895CD00123C0C08008D8C31A8CE
64462+:1032100031AB3FFF01924821000B5180012A402130
64463+:1032200001052024ACC400283107007F3C06800C37
64464+:1032300000E620219083000D00A31024304500FFFC
64465+:1032400010A0FFD8AF8400449098000D330F0010F9
64466+:1032500015E0FFD58FBF00200E0019000000000010
64467+:103260003C0380008C7901B80720FFFE00000000BD
64468+:10327000AE1200008C7F0144AE1F0004A6110008AE
64469+:1032800024110002A211000BAE1300243C1308010C
64470+:10329000927396F0327000015200FFC38FBF00207E
64471+:1032A0000E002146024020210A0020638FBF00202B
64472+:1032B0003C1260008E452C083C03F0033462FFFF93
64473+:1032C00000A2F824AE5F2C088E582C083C1901C0CF
64474+:1032D00003199825AE532C080A0020638FBF0020E5
64475+:1032E000264D010031AF007F3C10800A240EFF8084
64476+:1032F00001F0282101AE60243C0B8000AD6C00245D
64477+:103300001660FFA8AF85003824110003A0B100FCAF
64478+:103310000A0020638FBF002026480100310A007F89
64479+:103320003C0B800A2409FF80014B30210109202435
64480+:103330003C078000ACE400240A002062AF8600381D
64481+:10334000944E0012320C3FFF31CD3FFF15ACFF7D94
64482+:10335000241F608290D900D42418FF800319782498
64483+:1033600031EA00FF1140FF7700000000240700044D
64484+:10337000A0C700FC8F870044241160842406000D40
64485+:10338000A4B10008A0A600050A00204D24020002F6
64486+:103390003C040001248496DC24030014240200FE73
64487+:1033A0003C010800AC2431EC3C010800AC2331E8BE
64488+:1033B0003C010801A42296F83C040801248496F8F4
64489+:1033C0000000182100643021A0C300042463000120
64490+:1033D0002C6500FF54A0FFFC006430213C0708006E
64491+:1033E00024E7010003E00008AF87007800A058211F
64492+:1033F000008048210000102114A00012000050217C
64493+:103400000A002142000000003C010801A42096F8B7
64494+:103410003C05080194A596F88F8200783C0C0801C1
64495+:10342000258C96F800E2182100AC2021014B302BAE
64496+:10343000A089000400001021A460000810C0003919
64497+:10344000010048218F8600780009384000E94021BA
64498+:103450000008388000E6282190A8000B90B9000AE7
64499+:103460000008204000881021000218800066C0215A
64500+:10347000A319000A8F85007800E5782191EE000AF3
64501+:1034800091E6000B000E684001AE6021000C208028
64502+:1034900000851021A046000B3C030801906396F2C2
64503+:1034A000106000222462FFFF8F8300383C01080176
64504+:1034B000A02296F2906C00FF118000040000000032
64505+:1034C000906E00FF25CDFFFFA06D00FF3C190801A5
64506+:1034D000973996F8272300013078FFFF2F0F00FF60
64507+:1034E00011E0FFC9254A00013C010801A42396F818
64508+:1034F0003C05080194A596F88F8200783C0C0801E1
64509+:10350000258C96F800E2182100AC2021014B302BCD
64510+:10351000A089000400001021A460000814C0FFC9A5
64511+:103520000100482103E000080000000003E000085B
64512+:103530002402000227BDFFE0248501002407FF804C
64513+:10354000AFB00010AFBF0018AFB1001400A718242F
64514+:103550003C10800030A4007F3C06800A00862821B1
64515+:103560008E110024AE03002490A200FF1440000836
64516+:10357000AF850038A0A000098FBF0018AE1100244D
64517+:103580008FB100148FB0001003E0000827BD0020A9
64518+:1035900090A900FD90A800FF312400FF0E0020F448
64519+:1035A000310500FF8F8500388FBF0018A0A00009EB
64520+:1035B000AE1100248FB100148FB0001003E000089A
64521+:1035C00027BD002027BDFFD0AFB20020AFB1001C47
64522+:1035D000AFB00018AFBF002CAFB40028AFB30024C9
64523+:1035E0003C0980009533011635320C00952F011AE5
64524+:1035F0003271FFFF023280218E08000431EEFFFF9E
64525+:10360000248B0100010E6821240CFF8025A5FFFFFB
64526+:10361000016C50243166007F3C07800AAD2A0024EB
64527+:1036200000C73021AF850074AF8800703C010801ED
64528+:10363000A02096F190C300090200D02100809821BB
64529+:10364000306300FF2862000510400048AF86003854
64530+:10365000286400021480008E24140001240D00054B
64531+:103660003C010801A02D96D590CC00FD3C0108013D
64532+:10367000A02096D63C010801A02096D790CB000A46
64533+:10368000240AFF80318500FF014B4824312700FFC9
64534+:1036900010E0000C000058213C12800836510080D8
64535+:1036A0008E2F00308CD0005C01F0702305C0018E9D
64536+:1036B0008F87007090D4000A3284007FA0C4000A73
64537+:1036C0008F8600383C118008363000808E0F003025
64538+:1036D0008F87007000EF702319C000EE000000001B
64539+:1036E00090D4000924120002328400FF1092024795
64540+:1036F000000000008CC2005800E2F82327F9FFFF09
64541+:103700001B2001300000000090C5000924080004BF
64542+:1037100030A300FF10680057240A00013C01080193
64543+:10372000A02A96D590C900FF252700013C01080179
64544+:10373000A02796D43C030801906396D52406000583
64545+:103740001066006A2C780005130000C40000902168
64546+:103750000003F8803C0408012484958003E4C82118
64547+:103760008F25000000A0000800000000241800FFC2
64548+:103770001078005C0000000090CC000A90CA00099C
64549+:103780003C080801910896F13187008000EA48253D
64550+:103790003C010801A02996DC90C500FD3C140801FD
64551+:1037A000929496F2311100013C010801A02596DDAA
64552+:1037B00090DF00FE3C010801A03F96DE90D200FFA2
64553+:1037C0003C010801A03296DF8CD900543C0108016D
64554+:1037D000AC3996E08CD000583C010801AC3096E43E
64555+:1037E0008CC3005C3C010801AC3496EC3C01080140
64556+:1037F000AC2396E8162000088FBF002C8FB4002859
64557+:103800008FB300248FB200208FB1001C8FB000183E
64558+:1038100003E0000827BD00303C1180009624010E13
64559+:103820000E000FD43094FFFF3C0B08018D6B96F413
64560+:103830000260382102802821AE2B01803C13080150
64561+:103840008E7396D401602021240600830E00102F71
64562+:10385000AFB300108FBF002C8FB400288FB30024AB
64563+:103860008FB200208FB1001C8FB0001803E0000859
64564+:1038700027BD00303C1808008F1831FC270F0001CD
64565+:103880003C010800AC2F31FC0A0021D700000000E9
64566+:103890001474FFB900000000A0C000FF3C05080040
64567+:1038A0008CA531E43C0308008C6331E03C02080045
64568+:1038B0008C4232048F99003834A80001241F000282
64569+:1038C0003C010801AC2396F43C010801A02896F0C5
64570+:1038D0003C010801A02296F3A33F00090A002190B1
64571+:1038E0008F8600380E002146000000000A0021D714
64572+:1038F0008F8600383C1F080193FF96D424190001DD
64573+:1039000013F902298F8700703C100801921096D895
64574+:103910003C06080190C696D610C000050200A02102
64575+:103920003C040801908496D9109001E48F870078B8
64576+:10393000001088408F9F0078023048210009C8801D
64577+:10394000033F702195D80008270F0001A5CF00087C
64578+:103950003C040801908496D93C05080190A596D6B0
64579+:103960000E0020F4000000008F8700780230202134
64580+:103970000004308000C720218C8500048F820074F1
64581+:1039800000A2402305020006AC8200048C8A0000DD
64582+:103990008F830070014310235C400001AC83000062
64583+:1039A0008F86003890CB00FF2D6C00025580002DD3
64584+:1039B000241400010230F821001F40800107282153
64585+:1039C00090B9000B8CAE00040019C0400319782197
64586+:1039D000000F1880006710218C4D000001AE882375
64587+:1039E0002630FFFF5E00001F241400018C440004F9
64588+:1039F0008CAA0000008A482319200019240E000414
64589+:103A00003C010801A02E96D590AD000B8CAB0004B4
64590+:103A1000000D8840022D80210010108000471021E9
64591+:103A20008C44000401646023058202009443000872
64592+:103A300090DF00FE90B9000B33E500FF54B900049D
64593+:103A40000107A021A0D400FE8F8700780107A021E4
64594+:103A50009284000B0E0020F4240500018F860038AC
64595+:103A600024140001125400962E500001160000424A
64596+:103A70003C08FFFF241900021659FF3F0000000018
64597+:103A8000A0C000FF8F860038A0D200090A0021D70D
64598+:103A90008F86003890C700092404000230E300FF3D
64599+:103AA0001064016F24090004106901528F880074AA
64600+:103AB0008CCE0054010E682325B10001062001754B
64601+:103AC000241800043C010801A03896D53C010801E7
64602+:103AD000A02096D490D400FD90D200FF2E4F00027B
64603+:103AE00015E0FF14328400FF000438408F8900780D
64604+:103AF00090DF00FF00E41021000220800089C8212F
64605+:103B00002FE500029324000B14A0FF0A24070002F3
64606+:103B100000041840006480210010588001692821A9
64607+:103B20008CAC0004010C50230540FF020000000093
64608+:103B30003C030801906396D614600005246F0001D1
64609+:103B40003C010801A02496D93C010801A02796D782
64610+:103B50003C010801A02F96D690CE00FF24E700017B
64611+:103B600031CD00FF01A7882B1220FFE990A4000BA4
64612+:103B70000A0021C6000000003C0508018CA596D46F
64613+:103B80003C12000400A8F82413F2000624020005E9
64614+:103B90003C090801912996D5152000022402000352
64615+:103BA000240200053C010801A02296F190C700FF05
64616+:103BB00014E0012024020002A0C200090A0021D75B
64617+:103BC0008F86003890CC00FF1180FEDA240A0001B5
64618+:103BD0008F8C00748F890078240F00030180682186
64619+:103BE0001160001E240E0002000540400105A021C6
64620+:103BF00000142080008990218E51000401918023BF
64621+:103C00000600FECC000000003C020801904296D65F
64622+:103C100014400005245800013C010801A02A96D751
64623+:103C20003C010801A02596D93C010801A03896D690
64624+:103C300090DF00FF010510210002C88033E500FF7E
64625+:103C4000254A00010329202100AA402B1500FEB9B6
64626+:103C50009085000B1560FFE50005404000054040E1
64627+:103C600001051821000310803C010801A02A96D408
64628+:103C70003C010801A02596D8004918218C64000455
64629+:103C800000E4F82327F9FFFF1F20FFE900000000F0
64630+:103C90008C63000000E358230560013A01A38823E8
64631+:103CA00010E301170184C0231B00FEA200000000E6
64632+:103CB0003C010801A02E96D50A002305240B000123
64633+:103CC000240E0004A0CE00093C0D08008DAD31F893
64634+:103CD0008F86003825A200013C010800AC2231F893
64635+:103CE0000A0021D7000000008CD9005C00F9C02335
64636+:103CF0001F00FE7B000000008CDF005C10FFFF65F2
64637+:103D00008F8400748CC3005C008340232502000173
64638+:103D10001C40FF60000000008CC9005C248700018B
64639+:103D200000E9282B10A0FE943C0D80008DAB01040F
64640+:103D30003C0C0001016C50241140FE8F2402001045
64641+:103D40003C010801A02296F10A0021D700000000E2
64642+:103D50008F9100748F86003826220001ACC2005C6F
64643+:103D60000A002292241400018F8700382404FF8067
64644+:103D70000000882190E9000A241400010124302564
64645+:103D8000A0E6000A3C05080190A596D63C0408016F
64646+:103D9000908496D90E0020F4000000008F86003831
64647+:103DA0008F85007890C800FD310700FF0007404074
64648+:103DB0000107F821001FC0800305C8219323000BD1
64649+:103DC000A0C300FD8F8500788F8600380305602131
64650+:103DD000918F000B000F704001CF6821000D808093
64651+:103DE000020510218C4B0000ACCB00548D840004E4
64652+:103DF0008F83007400645023194000022482000164
64653+:103E00002462000101074821ACC2005C0009308037
64654+:103E100000C5402100E02021240500010E0020F40F
64655+:103E20009110000B8F86003890C500FF10A0FF0C8A
64656+:103E3000001070408F85007801D06821000D10803F
64657+:103E4000004558218D6400008F8C0074018450233C
64658+:103E50002547000104E0FF02263100013C03080170
64659+:103E6000906396D62E2F0002247800013C010801B1
64660+:103E7000A03896D63C010801A03496D711E0FEF890
64661+:103E8000020038210A002365000740408F84003873
64662+:103E90008F8300748C85005800A340230502FE9A8E
64663+:103EA000AC8300580A00223B000000003C070801D8
64664+:103EB00090E796F2240200FF10E200BE8F860038E1
64665+:103EC0003C110801963196FA3C030801246396F8E8
64666+:103ED000262500013230FFFF30ABFFFF02036021D7
64667+:103EE0002D6A00FF1540008D918700043C010801F8
64668+:103EF000A42096FA8F88003800074840012728211F
64669+:103F0000911800FF000530802405000127140001EE
64670+:103F1000A11400FF3C120801925296F28F8800789B
64671+:103F20008F8E0070264F000100C820213C0108013F
64672+:103F3000A02F96F2AC8E00008F8D0074A48500082F
64673+:103F4000AC8D00043C030801906396D414600077A4
64674+:103F5000000090213C010801A02596D4A087000B09
64675+:103F60008F8C007800CC5021A147000A8F82003846
64676+:103F7000A04700FD8F840038A08700FE8F860038A0
64677+:103F80008F9F0070ACDF00548F990074ACD900583B
64678+:103F90008F8D00780127C02100185880016DA02165
64679+:103FA000928F000A000F704001CF18210003888013
64680+:103FB000022D8021A207000B8F8600780166602108
64681+:103FC000918A000B000A1040004A2021000428803A
64682+:103FD00000A64021A107000A3C07800834E90080C0
64683+:103FE0008D2200308F860038ACC2005C0A0022921D
64684+:103FF0002414000190CA00FF1540FEAD8F880074A4
64685+:10400000A0C400090A0021D78F860038A0C000FD97
64686+:104010008F98003824060001A30000FE3C0108012F
64687+:10402000A02696D53C010801A02096D40A0021C6FE
64688+:104030000000000090CB00FF3C040801908496F340
64689+:10404000316C00FF0184502B1540000F2402000347
64690+:1040500024020004A0C200090A0021D78F8600387C
64691+:1040600090C3000A2410FF8002035824316C00FF23
64692+:104070001180FDC1000000003C010801A02096D580
64693+:104080000A0021C600000000A0C200090A0021D7D2
64694+:104090008F86003890D4000A2412FF8002544824EE
64695+:1040A000312800FF1500FFF4240200083C0108013C
64696+:1040B000A02296F10A0021D70000000000108840DD
64697+:1040C0008F8B0070023018210003688001A7202127
64698+:1040D000AC8B00008F8A0074240C0001A48C0008B3
64699+:1040E000AC8A00043C05080190A596D62402000184
64700+:1040F00010A2FE1E24A5FFFF0A0022519084000B8F
64701+:104100000184A0231A80FD8B000000003C010801FF
64702+:10411000A02E96D50A002305240B00013C010801BE
64703+:10412000A42596FA0A0023B78F880038240B0001D3
64704+:10413000106B00228F9800388F85003890BF00FFE9
64705+:1041400033F900FF1079002B000000003C1F08012C
64706+:1041500093FF96D8001FC840033FC0210018A080DD
64707+:104160000288782191EE000AA08E000A8F8D0078D7
64708+:104170003C030801906396D800CD88210A0023DD16
64709+:10418000A223000B263000010600003101A4902379
64710+:104190000640002B240200033C010801A02F96D505
64711+:1041A0000A002305240B00018F8900380A00223BF6
64712+:1041B000AD2700540A00229124120001931400FD3F
64713+:1041C000A094000B8F8800388F8F0078910E00FE2E
64714+:1041D00000CF6821A1AE000A8F910038A22700FD10
64715+:1041E0008F8300708F900038AE0300540A0023DEE6
64716+:1041F0008F8D007890B000FEA090000A8F8B003861
64717+:104200008F8C0078916A00FD00CC1021A04A000B31
64718+:104210008F840038A08700FE8F8600748F85003859
64719+:10422000ACA600580A0023DE8F8D007894B80008F1
64720+:10423000ACA40004030378210A002285A4AF00087F
64721+:104240003C010801A02296D50A0021C6000000000A
64722+:1042500090CF0009240D000431EE00FF11CDFD8543
64723+:10426000240200013C010801A02296D50A0021C6C3
64724+:1042700000000000080033440800334408003420E4
64725+:10428000080033F4080033D8080033280800332826
64726+:10429000080033280800334C8008010080080080A3
64727+:1042A000800800005F865437E4AC62CC50103A4579
64728+:1042B00036621985BF14C0E81BC27A1E84F4B55655
64729+:1042C000094EA6FE7DDA01E7C04D748108005A74DC
64730+:1042D00008005AB808005A5C08005A5C08005A5C8A
64731+:1042E00008005A5C08005A7408005A5C08005A5CBE
64732+:1042F00008005AC008005A5C080059D408005A5CEB
64733+:1043000008005A5C08005AC008005A5C08005A5C51
64734+:1043100008005A5C08005A5C08005A5C08005A5CA5
64735+:1043200008005A5C08005A5C08005A5C08005A5C95
64736+:1043300008005A9408005A5C08005A9408005A5C15
64737+:1043400008005A5C08005A5C08005A9808005A9401
64738+:1043500008005A5C08005A5C08005A5C08005A5C65
64739+:1043600008005A5C08005A5C08005A5C08005A5C55
64740+:1043700008005A5C08005A5C08005A5C08005A5C45
64741+:1043800008005A5C08005A5C08005A5C08005A5C35
64742+:1043900008005A5C08005A5C08005A5C08005A5C25
64743+:1043A00008005A9808005A9808005A5C08005A9861
64744+:1043B00008005A5C08005A5C08005A5C08005A5C05
64745+:1043C00008005A5C08005A5C08005A5C08005A5CF5
64746+:1043D00008005A5C08005A5C08005A5C08005A5CE5
64747+:1043E00008005A5C08005A5C08005A5C08005A5CD5
64748+:1043F00008005A5C08005A5C08005A5C08005A5CC5
64749+:1044000008005A5C08005A5C08005A5C08005A5CB4
64750+:1044100008005A5C08005A5C08005A5C08005A5CA4
64751+:1044200008005A5C08005A5C08005A5C08005A5C94
64752+:1044300008005A5C08005A5C08005A5C08005A5C84
64753+:1044400008005A5C08005A5C08005A5C08005A5C74
64754+:1044500008005A5C08005A5C08005A5C08005A5C64
64755+:1044600008005A5C08005A5C08005A5C08005A5C54
64756+:1044700008005A5C08005A5C08005A5C08005A5C44
64757+:1044800008005A5C08005A5C08005A5C08005A5C34
64758+:1044900008005A5C08005A5C08005A5C08005A5C24
64759+:1044A00008005A5C08005A5C08005A5C08005A5C14
64760+:1044B00008005A5C08005A5C08005A5C08005A5C04
64761+:1044C00008005A5C08005A5C08005A5C08005ADC74
64762+:1044D0000800782C08007A900800783808007628C0
64763+:1044E00008007838080078C4080078380800762872
64764+:1044F0000800762808007628080076280800762824
64765+:104500000800762808007628080076280800762813
64766+:1045100008007628080078580800784808007628AF
64767+:1045200008007628080076280800762808007628F3
64768+:1045300008007628080076280800762808007628E3
64769+:1045400008007628080076280800762808007848B1
64770+:10455000080082FC08008188080082C40800818865
64771+:104560000800829408008070080081880800818813
64772+:1045700008008188080081880800818808008188F7
64773+:1045800008008188080081880800818808008188E7
64774+:104590000800818808008188080081B008008D34F7
64775+:1045A00008008E9008008E70080088D808008D4C96
64776+:1045B0000A00012400000000000000000000000DBF
64777+:1045C000747061362E322E31620000000602010145
64778+:1045D00000000000000000000000000000000000DB
64779+:1045E00000000000000000000000000000000000CB
64780+:1045F00000000000000000000000000000000000BB
64781+:1046000000000000000000000000000000000000AA
64782+:10461000000000000000000000000000000000009A
64783+:10462000000000000000000000000000000000008A
64784+:10463000000000000000000000000000000000007A
64785+:104640000000000010000003000000000000000D4A
64786+:104650000000000D3C020800244217203C03080023
64787+:1046600024632A10AC4000000043202B1480FFFD7F
64788+:10467000244200043C1D080037BD2FFC03A0F0219C
64789+:104680003C100800261004903C1C0800279C1720B2
64790+:104690000E000262000000000000000D2402FF80F6
64791+:1046A00027BDFFE000821024AFB00010AF42002011
64792+:1046B000AFBF0018AFB10014936500043084007FD1
64793+:1046C000034418213C0200080062182130A5002094
64794+:1046D000036080213C080111277B000814A0000220
64795+:1046E0002466005C2466005892020004974301048B
64796+:1046F000920400043047000F3063FFFF3084004015
64797+:10470000006728231080000900004821920200055C
64798+:1047100030420004104000050000000010A000031B
64799+:104720000000000024A5FFFC2409000492020005FB
64800+:1047300030420004104000120000000010A00010E1
64801+:10474000000000009602000200A72021010440257D
64802+:104750002442FFFEA7421016920300042402FF80A9
64803+:1047600000431024304200FF104000033C020400CC
64804+:104770000A000174010240258CC20000AF421018EB
64805+:104780008F4201780440FFFE2402000AA742014044
64806+:1047900096020002240400093042000700021023A0
64807+:1047A00030420007A7420142960200022442FFFE67
64808+:1047B000A7420144A740014697420104A74201488D
64809+:1047C0008F420108304200205040000124040001C3
64810+:1047D00092020004304200101440000234830010A2
64811+:1047E00000801821A743014A0000000000000000DB
64812+:1047F0000000000000000000AF48100000000000B2
64813+:104800000000000000000000000000008F421000C7
64814+:104810000441FFFE3102FFFF1040000700000000CE
64815+:1048200092020004304200401440000300000000E7
64816+:104830008F421018ACC20000960200063042FFFF03
64817+:10484000244200020002104300021040036288214B
64818+:10485000962200001120000D3044FFFF00A7102118
64819+:104860008F8300388F45101C0002108200021080D8
64820+:1048700000431021AC45000030A6FFFF0E00058D5F
64821+:1048800000052C0200402021A62200009203000413
64822+:104890002402FF8000431024304200FF1040001F1C
64823+:1048A0000000000092020005304200021040001B90
64824+:1048B000000000009742100C2442FFFEA742101691
64825+:1048C000000000003C02040034420030AF421000FF
64826+:1048D00000000000000000000000000000000000D8
64827+:1048E0008F4210000441FFFE000000009742100CB0
64828+:1048F0008F45101C3042FFFF24420030000210821E
64829+:1049000000021080005B1021AC45000030A6FFFFC4
64830+:104910000E00058D00052C02A62200009604000260
64831+:10492000248400080E0001E93084FFFF974401044D
64832+:104930000E0001F73084FFFF8FBF00188FB1001405
64833+:104940008FB000103C02100027BD002003E00008DB
64834+:10495000AF4201783084FFFF308200078F8500244A
64835+:1049600010400002248300073064FFF800A41021E7
64836+:1049700030421FFF03421821247B4000AF850028EE
64837+:10498000AF82002403E00008AF4200843084FFFFC0
64838+:104990003082000F8F85002C8F860034104000027B
64839+:1049A0002483000F3064FFF000A410210046182B70
64840+:1049B000AF8500300046202314600002AF82002C37
64841+:1049C000AF84002C8F82002C340480000342182115
64842+:1049D00000641821AF83003803E00008AF42008074
64843+:1049E0008F820014104000088F8200048F82FFDC49
64844+:1049F000144000058F8200043C02FFBF3442FFFFD9
64845+:104A0000008220248F82000430430006240200022A
64846+:104A10001062000F3C0201012C62000350400005AF
64847+:104A2000240200041060000F3C0200010A00023062
64848+:104A30000000000010620005240200061462000C51
64849+:104A40003C0201110A000229008210253C020011DB
64850+:104A500000821025AF421000240200010A0002303B
64851+:104A6000AF82000C00821025AF421000AF80000C16
64852+:104A700000000000000000000000000003E000084B
64853+:104A8000000000008F82000C1040000400000000B5
64854+:104A90008F4210000441FFFE0000000003E0000808
64855+:104AA000000000008F8200102443F800000231C291
64856+:104AB00024C2FFF02C6303011060000300021042C7
64857+:104AC0000A000257AC8200008F85001800C5102B29
64858+:104AD0001440000B0000182100C5102324470001DA
64859+:104AE0008F82001C00A210212442FFFF0046102BE1
64860+:104AF000544000042402FFFF0A000257AC87000064
64861+:104B00002402FFFF0A000260AC8200008C820000D9
64862+:104B10000002194000621821000318800062182169
64863+:104B2000000318803C0208002442175C0062182130
64864+:104B300003E000080060102127BDFFD8AFBF0020B0
64865+:104B4000AFB1001CAFB000183C0460088C8250006C
64866+:104B50002403FF7F3C066000004310243442380CDD
64867+:104B6000AC8250008CC24C1C3C1A80000002160221
64868+:104B70003042000F10400007AF82001C8CC34C1C59
64869+:104B80003C02001F3442FC0000621824000319C2DA
64870+:104B9000AF8300188F420008275B400034420001B9
64871+:104BA000AF420008AF8000243C02601CAF40008090
64872+:104BB000AF4000848C4500088CC308083402800094
64873+:104BC000034220212402FFF0006218243C020080EE
64874+:104BD0003C010800AC2204203C025709AF84003895
64875+:104BE00014620004AF850034240200010A0002921E
64876+:104BF000AF820014AF8000148F42000038420001E1
64877+:104C0000304200011440FFFC8F8200141040001657
64878+:104C10000000000097420104104000058F8300004F
64879+:104C2000146000072462FFFF0A0002A72C62000A3A
64880+:104C30002C620010504000048F83000024620001A9
64881+:104C4000AF8200008F8300002C62000A1440000332
64882+:104C50002C6200070A0002AEAF80FFDC10400002A9
64883+:104C600024020001AF82FFDC8F4301088F44010062
64884+:104C700030622000AF83000410400008AF840010B1
64885+:104C80003C0208008C42042C244200013C01080034
64886+:104C9000AC22042C0A00058A3C0240003065020068
64887+:104CA00014A0000324020F001482026024020D00ED
64888+:104CB00097420104104002C83C02400030624000AC
64889+:104CC000144000AD8F8200388C4400088F42017878
64890+:104CD0000440FFFE24020800AF42017824020008CD
64891+:104CE000A7420140A7400142974201048F8400047B
64892+:104CF0003051FFFF30820001104000070220802168
64893+:104D00002623FFFE240200023070FFFFA742014667
64894+:104D10000A0002DBA7430148A74001463C02080005
64895+:104D20008C42043C1440000D8F8300103082002020
64896+:104D30001440000224030009240300010060202124
64897+:104D40008F830010240209005062000134840004A3
64898+:104D5000A744014A0A0002F60000000024020F00E6
64899+:104D60001462000530820020144000062403000D68
64900+:104D70000A0002F524030005144000022403000980
64901+:104D800024030001A743014A3C0208008C4204208E
64902+:104D90003C0400480E00020C004420250E000235A1
64903+:104DA000000000008F82000C1040003E0000000058
64904+:104DB0008F4210003C0300200043102410400039B3
64905+:104DC0008F820004304200021040003600000000D4
64906+:104DD000974210141440003300000000974210085E
64907+:104DE0008F8800383042FFFF2442000600021882FC
64908+:104DF0000003388000E83021304300018CC40000FB
64909+:104E000010600004304200030000000D0A00033768
64910+:104E100000E81021544000103084FFFF3C05FFFFE4
64911+:104E200000852024008518260003182B0004102B71
64912+:104E300000431024104000050000000000000000A6
64913+:104E40000000000D00000000240002228CC20000BF
64914+:104E50000A000336004520253883FFFF0003182B86
64915+:104E60000004102B00431024104000050000000037
64916+:104E7000000000000000000D000000002400022BD4
64917+:104E80008CC200003444FFFF00E81021AC44000055
64918+:104E90003C0208008C420430244200013C0108001E
64919+:104EA000AC2204308F6200008F840038AF8200088B
64920+:104EB0008C8300003402FFFF1462000F00001021F9
64921+:104EC0003C0508008CA504543C0408008C84045064
64922+:104ED00000B0282100B0302B008220210086202144
64923+:104EE0003C010800AC2504543C010800AC240450EB
64924+:104EF0000A000580240400088C8200003042010072
64925+:104F00001040000F000010213C0508008CA5044C47
64926+:104F10003C0408008C84044800B0282100B0302BE9
64927+:104F200000822021008620213C010800AC25044C91
64928+:104F30003C010800AC2404480A0005802404000851
64929+:104F40003C0508008CA504443C0408008C84044003
64930+:104F500000B0282100B0302B0082202100862021C3
64931+:104F60003C010800AC2504443C010800AC2404408A
64932+:104F70000A000580240400088F6200088F62000088
64933+:104F800000021602304300F02402003010620005D7
64934+:104F900024020040106200E08F8200200A00058891
64935+:104FA0002442000114A000050000000000000000E1
64936+:104FB0000000000D00000000240002568F4201781E
64937+:104FC0000440FFFE000000000E00023D27A4001078
64938+:104FD0001440000500408021000000000000000D8A
64939+:104FE000000000002400025D8E0200001040000559
64940+:104FF00000000000000000000000000D00000000A4
64941+:10500000240002608F62000C0443000324020001AC
64942+:105010000A00042EAE000000AE0200008F820038AD
64943+:105020008C480008A20000078F65000C8F64000404
64944+:1050300030A3FFFF0004240200852023308200FFFC
64945+:105040000043102124420005000230832CC200815D
64946+:10505000A605000A14400005A20400040000000098
64947+:105060000000000D00000000240002788F85003849
64948+:105070000E0005AB260400148F6200048F43010864
64949+:10508000A60200083C02100000621824106000080C
64950+:105090000000000097420104920300072442FFEC45
64951+:1050A000346300023045FFFF0A0003C3A203000778
64952+:1050B000974201042442FFF03045FFFF96060008A6
64953+:1050C0002CC200135440000592030007920200070F
64954+:1050D00034420001A20200079203000724020001EB
64955+:1050E00010620005240200031062000B8F8200385A
64956+:1050F0000A0003E030C6FFFF8F8200383C04FFFF48
64957+:105100008C43000C0064182400651825AC43000C87
64958+:105110000A0003E030C6FFFF3C04FFFF8C43001091
64959+:105120000064182400651825AC43001030C6FFFF4A
64960+:1051300024C2000200021083A20200058F830038FF
64961+:10514000304200FF00021080004328218CA800009C
64962+:105150008CA2000024030004000217021443001272
64963+:1051600000000000974201043C03FFFF01031824E4
64964+:105170003042FFFF004610232442FFFE006240251C
64965+:10518000ACA8000092030005306200FF000210800E
64966+:1051900000501021904200143042000F00431021B3
64967+:1051A0000A000415A20200068CA400049742010420
64968+:1051B0009603000A3088FFFF3042FFFF00461023AD
64969+:1051C0002442FFD60002140001024025ACA80004CE
64970+:1051D000920200079204000524630028000318834C
64971+:1051E0000064182134420004A2030006A202000752
64972+:1051F0008F8200042403FFFB34420002004310248A
64973+:10520000AF820004920300068F87003800031880E5
64974+:10521000007010218C4400203C02FFF63442FFFF56
64975+:105220000082402400671821AE04000CAC68000C1A
64976+:10523000920500063C03FF7F8E02000C00052880CB
64977+:1052400000B020213463FFFF01033024948800263E
64978+:1052500000A7282100431024AE02000CAC860020D9
64979+:10526000AC880024ACA8001024020010A742014022
64980+:1052700024020002A7400142A7400144A742014680
64981+:10528000974201043C0400082442FFFEA742014863
64982+:10529000240200010E00020CA742014A9603000AF4
64983+:1052A0009202000400431021244200023042000711
64984+:1052B00000021023304200070E000235AE0200103B
64985+:1052C0008F6200003C0308008C6304442404001037
64986+:1052D000AF820008974201043042FFFF2442FFFEE4
64987+:1052E00000403821000237C33C0208008C420440D1
64988+:1052F000006718210067282B004610210045102167
64989+:105300003C010800AC2304443C010800AC220440EA
64990+:105310000A0005150000000014A0000500000000B0
64991+:10532000000000000000000D000000002400030A3F
64992+:105330008F4201780440FFFE000000000E00023D95
64993+:1053400027A4001414400005004080210000000044
64994+:105350000000000D00000000240003118E02000078
64995+:105360005440000692020007000000000000000DFB
64996+:10537000000000002400031C9202000730420004D9
64997+:10538000104000058F8200042403FFFB344200021A
64998+:1053900000431024AF8200048F620004044300081D
64999+:1053A00092020007920200068E03000CAE0000007D
65000+:1053B0000002108000501021AC4300209202000730
65001+:1053C00030420004544000099602000A920200058F
65002+:1053D0003C03000100021080005010218C46001890
65003+:1053E00000C33021AC4600189602000A9206000461
65004+:1053F000277100080220202100C2302124C60005A8
65005+:10540000260500140E0005AB00063082920400064B
65006+:105410008F6500043C027FFF000420800091202162
65007+:105420008C8300043442FFFF00A228240065182169
65008+:10543000AC8300049202000792040005920300046A
65009+:10544000304200041040001496070008308400FF2A
65010+:1054500000042080009120218C86000497420104E2
65011+:105460009605000A306300FF3042FFFF0043102121
65012+:105470000045102130E3FFFF004310232442FFD8F2
65013+:1054800030C6FFFF0002140000C23025AC860004C5
65014+:105490000A0004C992030007308500FF0005288038
65015+:1054A00000B128218CA4000097420104306300FF62
65016+:1054B0003042FFFF00431021004710233C03FFFF51
65017+:1054C000008320243042FFFF00822025ACA400008E
65018+:1054D0009203000724020001106200060000000091
65019+:1054E0002402000310620011000000000A0004EC16
65020+:1054F0008E03001097420104920300049605000AEF
65021+:105500008E24000C00431021004510212442FFF29C
65022+:105510003C03FFFF008320243042FFFF0082202550
65023+:10552000AE24000C0A0004EC8E0300109742010424
65024+:10553000920300049605000A8E24001000431021F7
65025+:10554000004510212442FFEE3C03FFFF008320248E
65026+:105550003042FFFF00822025AE2400108E03001091
65027+:105560002402000AA7420140A74301429603000A11
65028+:10557000920200043C04004000431021A742014471
65029+:10558000A740014697420104A742014824020001B6
65030+:105590000E00020CA742014A0E0002350000000076
65031+:1055A0008F6200009203000400002021AF820008F7
65032+:1055B000974201049606000A3042FFFF006218215C
65033+:1055C000006028213C0308008C6304443C0208006E
65034+:1055D0008C42044000651821004410210065382BDE
65035+:1055E000004710213C010800AC2304443C010800A2
65036+:1055F000AC22044092040004008620212484000A86
65037+:105600003084FFFF0E0001E9000000009744010410
65038+:105610003084FFFF0E0001F7000000003C02100084
65039+:10562000AF4201780A0005878F820020148200278C
65040+:105630003062000697420104104000673C024000BF
65041+:105640003062400010400005000000000000000033
65042+:105650000000000D00000000240004208F420178AB
65043+:105660000440FFFE24020800AF4201782402000833
65044+:10567000A7420140A74001428F82000497430104E2
65045+:1056800030420001104000073070FFFF2603FFFE8C
65046+:1056900024020002A7420146A74301480A00053F31
65047+:1056A0002402000DA74001462402000DA742014A32
65048+:1056B0008F62000024040008AF8200080E0001E998
65049+:1056C000000000000A0005190200202110400042DD
65050+:1056D0003C02400093620000304300F024020010BE
65051+:1056E0001062000524020070106200358F820020D5
65052+:1056F0000A000588244200018F62000097430104DC
65053+:105700003050FFFF3071FFFF8F4201780440FFFEF1
65054+:105710003202000700021023304200072403000A6F
65055+:105720002604FFFEA7430140A7420142A7440144CB
65056+:10573000A7400146A75101488F420108304200208E
65057+:10574000144000022403000924030001A743014A76
65058+:105750000E00020C3C0400400E0002350000000068
65059+:105760003C0708008CE70444021110212442FFFE8C
65060+:105770003C0608008CC604400040182100E3382194
65061+:10578000000010218F65000000E3402B00C2302193
65062+:105790002604000800C830213084FFFFAF850008D0
65063+:1057A0003C010800AC2704443C010800AC2604403E
65064+:1057B0000E0001E9000000000A0005190220202166
65065+:1057C0000E00013B000000008F82002024420001F7
65066+:1057D000AF8200203C024000AF4201380A00029232
65067+:1057E000000000003084FFFF30C6FFFF00052C00E2
65068+:1057F00000A628253882FFFF004510210045282BF0
65069+:105800000045102100021C023042FFFF004310211E
65070+:1058100000021C023042FFFF004310213842FFFF0C
65071+:1058200003E000083042FFFF3084FFFF30A5FFFF98
65072+:1058300000001821108000070000000030820001E5
65073+:105840001040000200042042006518210A0005A152
65074+:105850000005284003E000080060102110C0000689
65075+:1058600024C6FFFF8CA2000024A50004AC82000027
65076+:105870000A0005AB2484000403E0000800000000D7
65077+:1058800010A0000824A3FFFFAC8600000000000069
65078+:10589000000000002402FFFF2463FFFF1462FFFAF0
65079+:1058A0002484000403E00008000000000000000160
65080+:1058B0000A00002A00000000000000000000000DA7
65081+:1058C000747870362E322E3162000000060201001C
65082+:1058D00000000000000001360000EA600000000047
65083+:1058E00000000000000000000000000000000000B8
65084+:1058F00000000000000000000000000000000000A8
65085+:105900000000000000000000000000000000000097
65086+:105910000000001600000000000000000000000071
65087+:105920000000000000000000000000000000000077
65088+:105930000000000000000000000000000000000067
65089+:1059400000000000000000000000138800000000BC
65090+:10595000000005DC00000000000000001000000353
65091+:10596000000000000000000D0000000D3C020800D7
65092+:1059700024423D683C0308002463401CAC40000006
65093+:105980000043202B1480FFFD244200043C1D08002E
65094+:1059900037BD7FFC03A0F0213C100800261000A8B2
65095+:1059A0003C1C0800279C3D680E00044E00000000CF
65096+:1059B0000000000D27BDFFB4AFA10000AFA200049E
65097+:1059C000AFA30008AFA4000CAFA50010AFA6001451
65098+:1059D000AFA70018AFA8001CAFA90020AFAA0024F1
65099+:1059E000AFAB0028AFAC002CAFAD0030AFAE003491
65100+:1059F000AFAF0038AFB8003CAFB90040AFBC004417
65101+:105A0000AFBF00480E000591000000008FBF0048A6
65102+:105A10008FBC00448FB900408FB8003C8FAF003876
65103+:105A20008FAE00348FAD00308FAC002C8FAB0028D0
65104+:105A30008FAA00248FA900208FA8001C8FA7001810
65105+:105A40008FA600148FA500108FA4000C8FA3000850
65106+:105A50008FA200048FA1000027BD004C3C1B6004F6
65107+:105A60008F7A5030377B502803400008AF7A00000F
65108+:105A70008F86003C3C0390003C0280000086282575
65109+:105A800000A32025AC4400203C0380008C6700204C
65110+:105A900004E0FFFE0000000003E00008000000003A
65111+:105AA0000A000070240400018F85003C3C04800043
65112+:105AB0003483000100A3102503E00008AC8200201D
65113+:105AC00003E00008000010213084FFFF30A5FFFF35
65114+:105AD00010800007000018213082000110400002F1
65115+:105AE00000042042006518211480FFFB00052840B7
65116+:105AF00003E000080060102110C000070000000053
65117+:105B00008CA2000024C6FFFF24A50004AC82000084
65118+:105B100014C0FFFB2484000403E000080000000020
65119+:105B200010A0000824A3FFFFAC86000000000000C6
65120+:105B3000000000002402FFFF2463FFFF1462FFFA4D
65121+:105B40002484000403E000080000000090AA003153
65122+:105B50008FAB00108CAC00403C0300FF8D6800044C
65123+:105B6000AD6C00208CAD004400E060213462FFFF8A
65124+:105B7000AD6D00248CA700483C09FF000109C0243A
65125+:105B8000AD6700288CAE004C0182C824031978252B
65126+:105B9000AD6F0004AD6E002C8CAD0038314A00FFB3
65127+:105BA000AD6D001C94A900323128FFFFAD680010D4
65128+:105BB00090A70030A5600002A1600004A16700006A
65129+:105BC00090A30032306200FF0002198210600005CD
65130+:105BD000240500011065000E0000000003E000082D
65131+:105BE000A16A00018CD80028354A0080AD780018E1
65132+:105BF0008CCF0014AD6F00148CCE0030AD6E000859
65133+:105C00008CC4002CA16A000103E00008AD64000C04
65134+:105C10008CCD001CAD6D00188CC90014AD6900144A
65135+:105C20008CC80024AD6800088CC70020AD67000C4C
65136+:105C30008CC200148C8300700043C82B1320000713
65137+:105C4000000000008CC20014144CFFE400000000AF
65138+:105C5000354A008003E00008A16A00018C820070D0
65139+:105C60000A0000E6000000009089003027BDFFF820
65140+:105C70008FA8001CA3A900008FA300003C0DFF808B
65141+:105C800035A2FFFF8CAC002C00625824AFAB0000A3
65142+:105C9000A100000400C05821A7A000028D06000446
65143+:105CA00000A048210167C8218FA500000080502175
65144+:105CB0003C18FF7F032C20263C0E00FF2C8C00019B
65145+:105CC000370FFFFF35CDFFFF3C02FF0000AFC824B8
65146+:105CD00000EDC02400C27824000C1DC003236825F9
65147+:105CE00001F87025AD0D0000AD0E00048D240024D8
65148+:105CF000AFAD0000AD0400088D2C00202404FFFF90
65149+:105D0000AD0C000C9547003230E6FFFFAD060010E9
65150+:105D10009145004830A200FF000219C25060000106
65151+:105D20008D240034AD0400148D4700388FAA00186C
65152+:105D300027BD0008AD0B0028AD0A0024AD07001CEC
65153+:105D4000AD00002CAD00001803E00008AD000020FD
65154+:105D500027BDFFE0AFB20018AFB10014AFB0001024
65155+:105D6000AFBF001C9098003000C088213C0D00FFA0
65156+:105D7000330F007FA0CF0000908E003135ACFFFFC5
65157+:105D80003C0AFF00A0CE000194A6001EA220000441
65158+:105D90008CAB00148E29000400A08021016C282403
65159+:105DA000012A40240080902101052025A62600021A
65160+:105DB000AE24000426050020262400080E000092D0
65161+:105DC00024060002924700302605002826240014ED
65162+:105DD00000071E000003160324060004044000030D
65163+:105DE0002403FFFF965900323323FFFF0E00009279
65164+:105DF000AE230010262400248FBF001C8FB2001891
65165+:105E00008FB100148FB00010240500030000302172
65166+:105E10000A00009C27BD002027BDFFD8AFB1001CA1
65167+:105E2000AFB00018AFBF002090A9003024020001DD
65168+:105E300000E050213123003F00A040218FB00040FE
65169+:105E40000080882100C04821106200148FA700380C
65170+:105E5000240B000500A0202100C02821106B001396
65171+:105E6000020030210E000128000000009225007C75
65172+:105E700030A400021080000326030030AE00003082
65173+:105E8000260300348FBF00208FB1001C8FB0001894
65174+:105E90000060102103E0000827BD00280E0000A7C5
65175+:105EA000AFB000100A00016F000000008FA3003C9B
65176+:105EB000010020210120282101403021AFA3001042
65177+:105EC0000E0000EEAFB000140A00016F00000000E9
65178+:105ED0003C06800034C20E008C4400108F850044C4
65179+:105EE000ACA400208C43001803E00008ACA30024FD
65180+:105EF0003C06800034C20E008C4400148F850044A0
65181+:105F0000ACA400208C43001C03E00008ACA30024D8
65182+:105F10009382000C1040001B2483000F2404FFF028
65183+:105F20000064382410E00019978B00109784000E4D
65184+:105F30009389000D3C0A601C0A0001AC01644023F7
65185+:105F400001037021006428231126000231C2FFFFE3
65186+:105F500030A2FFFF0047302B50C0000E00E4482164
65187+:105F60008D4D000C31A3FFFF00036400000C2C03D7
65188+:105F700004A1FFF30000302130637FFF0A0001A479
65189+:105F80002406000103E00008000000009784000ED2
65190+:105F900000E448213123FFFF3168FFFF0068382B00
65191+:105FA00054E0FFF8A783000E938A000D114000050E
65192+:105FB000240F0001006BC023A380000D03E0000844
65193+:105FC000A798000E006BC023A38F000D03E000080C
65194+:105FD000A798000E03E000080000000027BDFFE8BE
65195+:105FE000AFB000103C10800036030140308BFFFF43
65196+:105FF00093AA002BAFBF0014A46B000436040E005C
65197+:106000009488001630C600FF8FA90030A4680006EF
65198+:10601000AC650008A0660012A46A001AAC670020F4
65199+:106020008FA5002CA4690018012020210E000198E2
65200+:10603000AC6500143C021000AE0201788FBF001462
65201+:106040008FB0001003E0000827BD00188F85000006
65202+:106050002484000727BDFFF83084FFF83C06800049
65203+:1060600094CB008A316AFFFFAFAA00008FA900001D
65204+:10607000012540232507FFFF30E31FFF0064102B9D
65205+:106080001440FFF700056882000D288034CC4000E2
65206+:1060900000AC102103E0000827BD00088F8200003B
65207+:1060A0002486000730C5FFF800A2182130641FFFC6
65208+:1060B00003E00008AF8400008F87003C8F84004419
65209+:1060C00027BDFFB0AFB70044AFB40038AFB1002C6C
65210+:1060D000AFBF0048AFB60040AFB5003CAFB300342F
65211+:1060E000AFB20030AFB000283C0B80008C8600249B
65212+:1060F000AD6700808C8A002035670E00356901008D
65213+:10610000ACEA00108C8800248D2500040000B82122
65214+:10611000ACE800188CE3001000A688230000A02142
65215+:10612000ACE300148CE20018ACE2001C122000FE6C
65216+:1061300000E0B021936C0008118000F40000000022
65217+:10614000976F001031EEFFFF022E682B15A000EFB5
65218+:1061500000000000977200103250FFFFAED0000028
65219+:106160003C0380008C740000329300081260FFFD35
65220+:106170000000000096D800088EC700043305FFFF1A
65221+:1061800030B5000112A000E4000000000000000D86
65222+:1061900030BFA0402419004013F9011B30B4A00007
65223+:1061A000128000DF000000009373000812600008F6
65224+:1061B00000000000976D001031ACFFFF00EC202BB9
65225+:1061C0001080000330AE004011C000D50000000078
65226+:1061D000A7850040AF87003893630008022028217C
65227+:1061E000AFB10020146000F527B40020AF60000CB0
65228+:1061F000978F004031F14000162000022403001662
65229+:106200002403000E24054007A363000AAF650014B1
65230+:10621000938A00428F70001431550001001512401E
65231+:1062200002024825AF690014979F00408F78001440
65232+:1062300033F9001003194025AF680014979200400D
65233+:106240003247000810E0016E000000008F67001464
65234+:106250003C1210003C11800000F27825AF6F001452
65235+:1062600036230E00946E000A3C0D81002406000EB9
65236+:1062700031CCFFFF018D2025AF640004A36600022E
65237+:106280009373000A3406FFFC266B0004A36B000A1C
65238+:1062900097980040330820001100015F00000000C3
65239+:1062A0003C05800034A90E00979900409538000CF9
65240+:1062B00097870040001940423312C00031030003A9
65241+:1062C00000127B0330F11000006F6825001172038B
65242+:1062D00001AE6025000C20C0A76400129793004017
65243+:1062E000936A000A001359823175003C02AA1021FA
65244+:1062F0002450003CA3700009953F000C33F93FFF88
65245+:10630000A779001097700012936900090130F821F5
65246+:1063100027E5000230B900070019C0233308000741
65247+:10632000A368000B9371000997720012976F001019
65248+:10633000322700FF8F910038978D004000F218211E
65249+:10634000006F702101C6602131A6004010C0000519
65250+:106350003185FFFF00B1102B3C1280001040001768
65251+:10636000000098210225A82B56A0013E8FA50020F1
65252+:106370003C048000348A0E008D5300143C068000DB
65253+:10638000AD5300108D4B001CAD4B0018AD45000007
65254+:106390008CCD000031AC00081180FFFD34CE0E0022
65255+:1063A00095C3000800A0882100009021A783004029
65256+:1063B0008DC6000424130001AF860038976F0010CB
65257+:1063C00031F5FFFF8E9F000003F1282310A0011F6D
65258+:1063D000AE85000093620008144000DD000000005C
65259+:1063E0000E0001E7240400108F900048004028218F
65260+:1063F0003C023200320600FF000654000142F8253C
65261+:1064000026090001AF890048ACBF0000937900095C
65262+:1064100097780012936F000A332800FF3303FFFFC1
65263+:106420000103382100076C0031EE00FF01AE60254A
65264+:10643000ACAC00048F840048978B0040316A200088
65265+:106440001140010AACA4000897640012308BFFFFD2
65266+:1064500006400108ACAB000C978E004031C5000827
65267+:1064600014A0000226280006262800023C1F8000F7
65268+:1064700037E70E0094F900148CE5001C8F670004C8
65269+:10648000937800023324FFFF330300FFAFA3001013
65270+:106490008F6F0014AFA800180E0001CBAFAF00142F
65271+:1064A000240400100E0001FB000000008E9200008A
65272+:1064B00016400005000000008F7800142403FFBF81
65273+:1064C0000303A024AF7400148F67000C00F5C821EB
65274+:1064D000AF79000C9375000816A0000800000000BA
65275+:1064E00012600006000000008F6800143C0AEFFFF5
65276+:1064F0003549FFFE0109F824AF7F0014A37300089B
65277+:106500008FA500200A00034F02202021AED10000F9
65278+:106510000A00022D3C03800014E0FF1E30BFA040A3
65279+:106520000E0001900000A0212E9100010237B0253D
65280+:1065300012C000188FBF00488F87003C24170F003F
65281+:1065400010F700D43C0680008CD901780720FFFEAC
65282+:10655000241F0F0010FF00F634CA0E008D560014E1
65283+:1065600034C7014024080240ACF600048D49001CE9
65284+:106570003C141000ACE90008A0E00012A4E0001AEE
65285+:10658000ACE00020A4E00018ACE80014ACD4017822
65286+:106590008FBF00488FB700448FB600408FB5003CD6
65287+:1065A0008FB400388FB300348FB200308FB1002C1D
65288+:1065B0008FB0002803E0000827BD00508F910038FD
65289+:1065C000978800403C1280000220A821310700403B
65290+:1065D00014E0FF7C00009821977900108F9200381A
65291+:1065E0003338FFFF131200A8000020210080A021F3
65292+:1065F000108000F300A088211620FECE00000000CD
65293+:106600000A00031F2E9100013C0380008C62017878
65294+:106610000440FFFE240808008F860000AC68017863
65295+:106620003C038000946D008A31ACFFFF0186582343
65296+:10663000256AFFFF31441FFF2C8900081520FFF950
65297+:10664000000000008F8F0048347040008F83003CB2
65298+:1066500000E0A021240E0F0025E70001AF870048CD
65299+:1066600000D03021023488233C08800031F500FF3F
65300+:10667000106E0005240700019398004233130001B7
65301+:106680000013924036470001001524003C0A010027
65302+:10669000008A4825ACC900008F82004830BF003610
65303+:1066A00030B90008ACC200041320009900FF9825FF
65304+:1066B00035120E009650000A8F8700003C0F8100B3
65305+:1066C0003203FFFF24ED000835060140006F60250E
65306+:1066D0003C0E100031AB1FFF269200062405000E71
65307+:1066E000ACCC0020026E9825A4C5001AAF8B000028
65308+:1066F000A4D20018162000083C1080008F89003CAE
65309+:1067000024020F00512200022417000136730040BA
65310+:106710000E0001883C10800036060E008CCB001461
65311+:10672000360A014002402021AD4B00048CC5001CFC
65312+:10673000AD450008A1550012AD5300140E0001989C
65313+:106740003C151000AE1501780A000352000000004D
65314+:10675000936F0009976E0012936D000B31E500FFF7
65315+:1067600000AE202131AC00FF008C80212602000AFF
65316+:106770003050FFFF0E0001E7020020218F86004805
65317+:106780003C0341003C05800024CB0001AF8B004856
65318+:10679000936A00099769001230C600FF315F00FF5D
65319+:1067A0003128FFFF03E8382124F900020006C40065
65320+:1067B0000319782501E37025AC4E00008F6D000CA5
65321+:1067C00034A40E00948B001401B26025AC4C00047C
65322+:1067D0008C85001C8F670004936A00023164FFFF00
65323+:1067E000314900FFAFA900108F680014AFB1001845
65324+:1067F0000E0001CBAFA800140A0002FD0200202108
65325+:10680000AF600004A36000029798004033082000A6
65326+:106810001500FEA300003021A760001297840040FD
65327+:10682000936B000A3C10800030931F0000135183CB
65328+:10683000014BA82126A20028A362000936090E00F8
65329+:10684000953F000C0A000295A77F00108F7000147E
65330+:10685000360900400E000188AF6900140A0002C921
65331+:10686000000000000A00034F000020210641FEFA4C
65332+:10687000ACA0000C8CAC000C3C0D8000018D902570
65333+:106880000A0002EAACB2000C000090210A0002C526
65334+:1068900024130001128000073C028000344B0E00DC
65335+:1068A0009566000830D300401260004900000000E7
65336+:1068B0003C0680008CD001780600FFFE34C50E0037
65337+:1068C00094B500103C03050034CC014032B8FFFF02
65338+:1068D00003039025AD92000C8CAF0014240D200012
65339+:1068E0003C041000AD8F00048CAE001CAD8E00087F
65340+:1068F000A1800012A580001AAD800020A58000189C
65341+:10690000AD8D0014ACC401780A0003263C0680005B
65342+:106910008F9F0000351801402692000227F90008D9
65343+:1069200033281FFFA71200180A000391AF88000048
65344+:106930003C02800034450140ACA0000C1280001BDA
65345+:1069400034530E0034510E008E370010ACB70004E3
65346+:106950008E2400183C0B8000ACA400083570014068
65347+:1069600024040040A20000128FBF0048A600001AB5
65348+:106970008FB70044AE0000208FB60040A60000187C
65349+:106980008FB5003CAE0400148FB400388FB30034D0
65350+:106990008FB200308FB1002C8FB000283C02100065
65351+:1069A00027BD005003E00008AD6201788E66001438
65352+:1069B000ACA600048E64001C0A00042A3C0B800074
65353+:1069C0000E0001902E9100010A0003200237B0252D
65354+:1069D000000000000000000D00000000240003691A
65355+:1069E0000A0004013C06800027BDFFD8AFBF00208D
65356+:1069F0003C0980003C1F20FFAFB200183C0760003C
65357+:106A000035320E002402001037F9FFFDACE23008E9
65358+:106A1000AFB3001CAFB10014AFB00010AE5900000E
65359+:106A20000000000000000000000000000000000066
65360+:106A3000000000003C1800FF3713FFFDAE530000BC
65361+:106A40003C0B60048D7050002411FF7F3C0E00024F
65362+:106A50000211782435EC380C35CD0109ACED4C1819
65363+:106A6000240A0009AD6C50008CE80438AD2A0008F7
65364+:106A7000AD2000148CE54C1C3106FFFF38C42F718B
65365+:106A800000051E023062000F2486C0B310400007CC
65366+:106A9000AF8200088CE54C1C3C09001F3528FC0027
65367+:106AA00000A81824000321C2AF8400048CF1080858
65368+:106AB0003C0F57092412F0000232702435F0001008
65369+:106AC00001D0602601CF68262DAA00012D8B000180
65370+:106AD000014B382550E00009A380000C3C1F601CCE
65371+:106AE0008FF8000824190001A399000C33137C00CF
65372+:106AF000A7930010A780000EA380000DAF80004870
65373+:106B000014C00003AF8000003C066000ACC0442C01
65374+:106B10000E0005B93C1080000E000F1A361101005E
65375+:106B20003C12080026523DD03C13080026733E500C
65376+:106B30008E03000038640001308200011440FFFC25
65377+:106B40003C0B800A8E2600002407FF8024C90240E7
65378+:106B5000312A007F014B402101272824AE06002066
65379+:106B6000AF880044AE0500243C048000AF86003CA2
65380+:106B70008C8C01780580FFFE24180800922F0008F5
65381+:106B8000AC980178A38F0042938E004231CD000172
65382+:106B900011A0000F24050D0024DFF8002FF90301D8
65383+:106BA0001320001C000629C224A4FFF00004104298
65384+:106BB000000231400E00020200D2D8213C02400007
65385+:106BC0003C068000ACC201380A0004A000000000AE
65386+:106BD00010C50023240D0F0010CD00273C1F800896
65387+:106BE00037F9008093380000240E0050330F00FF67
65388+:106BF00015EEFFF33C0240000E000A3600000000D4
65389+:106C00003C0240003C068000ACC201380A0004A0EF
65390+:106C1000000000008F83000400A3402B1500000B30
65391+:106C20008F8B0008006B50212547FFFF00E5482BA4
65392+:106C30001520000600A36023000C19400E0002027C
65393+:106C40000073D8210A0004C43C0240000000000D7B
65394+:106C50000E000202000000000A0004C43C024000D2
65395+:106C60003C1B0800277B3F500E0002020000000082
65396+:106C70000A0004C43C0240003C1B0800277B3F7014
65397+:106C80000E000202000000000A0004C43C024000A2
65398+:106C90003C0660043C09080025290104ACC9502CBD
65399+:106CA0008CC850003C0580003C0200023507008083
65400+:106CB000ACC750003C040800248415A43C03080021
65401+:106CC0002463155CACA50008ACA2000C3C010800D4
65402+:106CD000AC243D603C010800AC233D6403E00008A7
65403+:106CE0002402000100A030213C1C0800279C3D68C4
65404+:106CF0003C0C04003C0B0002008B3826008C402624
65405+:106D00002CE200010007502B2D050001000A4880ED
65406+:106D10003C03080024633D60004520250123182121
65407+:106D20001080000300001021AC6600002402000166
65408+:106D300003E00008000000003C1C0800279C3D68A0
65409+:106D40003C0B04003C0A0002008A3026008B3826E7
65410+:106D50002CC200010006482B2CE5000100094080F0
65411+:106D60003C03080024633D600045202501031821F1
65412+:106D700010800005000010213C0C0800258C155CDB
65413+:106D8000AC6C00002402000103E0000800000000D9
65414+:106D90003C0900023C08040000883026008938269F
65415+:106DA0002CC30001008028212CE400010083102561
65416+:106DB0001040000B000030213C1C0800279C3D685F
65417+:106DC0003C0A80008D4E00082406000101CA682597
65418+:106DD000AD4D00088D4C000C01855825AD4B000CC5
65419+:106DE00003E0000800C010213C1C0800279C3D68FF
65420+:106DF0003C0580008CA6000C000420272402000122
65421+:106E000000C4182403E00008ACA3000C3C020002FC
65422+:106E10001082000B3C0560003C0704001087000353
65423+:106E20000000000003E00008000000008CA908D06A
65424+:106E3000240AFFFD012A402403E00008ACA808D082
65425+:106E40008CA408D02406FFFE0086182403E0000866
65426+:106E5000ACA308D03C05601A34A600108CC3008097
65427+:106E600027BDFFF88CC50084AFA3000093A40000E9
65428+:106E70002402000110820003AFA5000403E0000813
65429+:106E800027BD000893A7000114E0001497AC00028E
65430+:106E900097B800023C0F8000330EFFFC01CF682141
65431+:106EA000ADA50000A3A000003C0660008CC708D080
65432+:106EB0002408FFFE3C04601A00E82824ACC508D072
65433+:106EC0008FA300048FA200003499001027BD000892
65434+:106ED000AF22008003E00008AF2300843C0B800059
65435+:106EE000318AFFFC014B48218D2800000A00057DF6
65436+:106EF000AFA8000427BDFFE8AFBF00103C1C08008E
65437+:106F0000279C3D683C0580008CA4000C8CA20004EA
65438+:106F10003C0300020044282410A0000A00A3182407
65439+:106F20003C0604003C0400021460000900A6102482
65440+:106F30001440000F3C0404000000000D3C1C08003D
65441+:106F4000279C3D688FBF001003E0000827BD001894
65442+:106F50003C0208008C423D600040F809000000003F
65443+:106F60003C1C0800279C3D680A0005A68FBF001046
65444+:106F70003C0208008C423D640040F809000000001B
65445+:106F80000A0005AC00000000000411C003E0000886
65446+:106F9000244202403C04080024843FB42405001A23
65447+:106FA0000A00009C0000302127BDFFE0AFB00010B8
65448+:106FB0003C108000AFBF0018AFB1001436110100C3
65449+:106FC000922200090E0005B63044007F8E3F00007B
65450+:106FD0008F89003C3C0F008003E26021258800403F
65451+:106FE0000049F821240DFF80310E00783198007897
65452+:106FF00035F9000135F100020319382501D1482582
65453+:10700000010D302403ED5824018D2824240A00406A
65454+:1070100024040080240300C0AE0B0024AE0008103E
65455+:10702000AE0A0814AE040818AE03081CAE05080426
65456+:10703000AE070820AE060808AE0908243609090084
65457+:107040009539000C3605098033ED007F3338FFFF9A
65458+:10705000001889C0AE110800AE0F0828952C000C4E
65459+:107060008FBF00188FB10014318BFFFF000B51C090
65460+:10707000AE0A002C8CA400508FB000108CA3003CF2
65461+:107080008D2700048CA8001C8CA600383C0E800ABA
65462+:1070900001AE102127BD0020AF820044AF84005014
65463+:1070A000AF830054AF87004CAF88005C03E000085A
65464+:1070B000AF8600603C09080091293FD924A800024E
65465+:1070C0003C05110000093C0000E8302500C51825EA
65466+:1070D00024820008AC83000003E00008AC800004B8
65467+:1070E0003C098000352309009128010B906A0011AA
65468+:1070F0002402002800804821314700FF00A07021B1
65469+:1071000000C068213108004010E20002340C86DD26
65470+:10711000240C08003C0A800035420A9A944700007B
65471+:10712000354B0A9C35460AA030F9FFFFAD39000007
65472+:107130008D780000354B0A8024040001AD3800042E
65473+:107140008CCF0000AD2F00089165001930A300031B
65474+:107150001064009028640002148000AF240500022F
65475+:107160001065009E240F0003106F00B435450AA47B
65476+:10717000240A0800118A0048000000005100003D68
65477+:107180003C0B80003C0480003483090090670012AF
65478+:1071900030E200FF004D7821000FC8802724000155
65479+:1071A0003C0A8000354F090091E50019354C0980F3
65480+:1071B0008D87002830A300FF0003150000475825E5
65481+:1071C0000004C4003C19600001793025370806FF2F
65482+:1071D000AD260000AD2800048DEA002C25280028EB
65483+:1071E000AD2A00088DEC0030AD2C000C8DE500348C
65484+:1071F000AD2500108DE400383C05800034AC093C1E
65485+:10720000AD2400148DE3001CAD2300188DE7002091
65486+:10721000AD27001C8DE20024AD2200208DF900284E
65487+:1072200034A20100AD3900248D830000AD0E0004AE
65488+:1072300034B90900AD0300008C47000C250200148E
65489+:10724000AD070008932B00123C04080090843FD83F
65490+:10725000AD000010317800FF030D302100064F0013
65491+:1072600000047C00012F702535CDFFFC03E00008F1
65492+:10727000AD0D000C35780900930600123C0508009E
65493+:1072800094A53FC830C800FF010D5021000A60805E
65494+:107290000A00063C018520211500005B000000006B
65495+:1072A0003C08080095083FCE3C06080094C63FC83D
65496+:1072B000010610213C0B800035790900933800113C
65497+:1072C000932A001935660A80330800FF94CF002AFC
65498+:1072D00000086082314500FF978A0058000C1E00AC
65499+:1072E000000524003047FFFF006410250047C0253B
65500+:1072F00001EA30213C0B4000030B402500066400EE
65501+:10730000AD280000AD2C0004932500183C030006B6
65502+:107310002528001400053E0000E31025AD220008DA
65503+:107320008F24002C3C05800034AC093CAD24000CBB
65504+:107330008F38001C34A20100254F0001AD38001029
65505+:107340008D830000AD0E000431EB7FFFAD03000024
65506+:107350008C47000C34B90900A78B0058AD07000812
65507+:10736000932B00123C04080090843FD8250200149F
65508+:10737000317800FF030D302100064F0000047C002F
65509+:10738000012F702535CDFFFCAD00001003E0000893
65510+:10739000AD0D000C3C02080094423FD23C050800B1
65511+:1073A00094A53FC835440AA43C07080094E73FC4AD
65512+:1073B000948B00000045C8210327C023000B1C004C
65513+:1073C0002706FFF200665025AD2A000CAD20001004
65514+:1073D000AD2C00140A00063025290018354F0AA4E8
65515+:1073E00095E50000956400280005140000043C00A9
65516+:1073F0003459810000EC5825AD39000CAD2B00103C
65517+:107400000A000630252900143C0C0800958C3FCE5C
65518+:107410000A000681258200015460FF56240A0800F4
65519+:1074200035580AA49706000000061C00006C502581
65520+:10743000AD2A000C0A000630252900103C03080084
65521+:1074400094633FD23C07080094E73FC83C0F080014
65522+:1074500095EF3FC494A4000095790028006710219F
65523+:10746000004F582300041C00001934002578FFEE5B
65524+:1074700000D87825346A8100AD2A000CAD2F0010A9
65525+:10748000AD200014AD2C00180A0006302529001C80
65526+:1074900003E00008240207D027BDFFE0AFB20018C8
65527+:1074A000AFB10014AFB00010AFBF001C0E00007CE5
65528+:1074B000008088218F8800548F87004C3C0580080D
65529+:1074C00034B20080011128213C1080002402008089
65530+:1074D000240300C000A72023AE0208183C06800841
65531+:1074E000AE03081C18800004AF850054ACC500042E
65532+:1074F0008CC90004AF89004C1220000936040980B1
65533+:107500000E0006F800000000924C00278E0B00745D
65534+:1075100001825004014B3021AE46000C3604098034
65535+:107520008C8E001C8F8F005C01CF682319A0000493
65536+:107530008FBF001C8C90001CAF90005C8FBF001CA4
65537+:107540008FB200188FB100148FB000100A00007EB7
65538+:1075500027BD00208F8600508F8300548F82004CFF
65539+:107560003C05800834A40080AC860050AC83003C0D
65540+:1075700003E00008ACA200043C0308008C63005444
65541+:1075800027BDFFF8308400FF2462000130A500FF12
65542+:107590003C010800AC22005430C600FF3C078000CC
65543+:1075A0008CE801780500FFFE3C0C7FFFA3A40003DC
65544+:1075B0008FAA0000358BFFFF014B4824000627C02F
65545+:1075C00001244025AFA8000034E201009043000AE6
65546+:1075D000A3A000023C1980FFA3A300018FAF00000D
65547+:1075E00030AE007F3738FFFF01F86024000E6E00D8
65548+:1075F0003C0A002034E50140018D58253549200022
65549+:107600002406FF803C04100027BD0008ACAB000C32
65550+:10761000ACA90014A4A00018A0A6001203E0000862
65551+:10762000ACE40178308800FF30A700FF3C03800005
65552+:107630008C6201780440FFFE3C0C8000358A0A0011
65553+:107640008D4B00203584014035850980AC8B0004CA
65554+:107650008D4900240007302B00061540AC89000836
65555+:10766000A088001090A3004CA083002D03E0000828
65556+:10767000A480001827BDFFE8308400FFAFBF0010D2
65557+:107680000E00075D30A500FF8F8300548FBF0010F0
65558+:107690003C06800034C50140344700402404FF907C
65559+:1076A0003C02100027BD0018ACA3000CA0A40012DF
65560+:1076B000ACA7001403E00008ACC2017827BDFFE0CE
65561+:1076C0003C088008AFBF001CAFB20018AFB1001477
65562+:1076D000AFB00010351000808E0600183C07800007
65563+:1076E000309200FF00C72025AE0400180E00007C79
65564+:1076F00030B100FF92030005346200080E00007EE6
65565+:10770000A2020005024020210E000771022028215C
65566+:10771000024020218FBF001C8FB200188FB10014CF
65567+:107720008FB0001024050005240600010A0007326E
65568+:1077300027BD00203C05800034A309809066000826
65569+:1077400030C200081040000F3C0A01013549080A08
65570+:10775000AC8900008CA80074AC8800043C070800C9
65571+:1077600090E73FD830E5001050A00008AC8000083A
65572+:107770003C0D800835AC00808D8B0058AC8B000828
65573+:107780002484000C03E00008008010210A0007B5E3
65574+:107790002484000C27BDFFE83C098000AFB0001036
65575+:1077A000AFBF00143526098090C8000924020006E6
65576+:1077B00000A05821310300FF3527090000808021F7
65577+:1077C000240500041062007B2408000294CF005CB2
65578+:1077D0003C0E020431EDFFFF01AE6025AE0C00004F
65579+:1077E00090CA00083144002010800008000000000A
65580+:1077F00090C2004E3C1F010337F90300305800FFD0
65581+:107800000319302524050008AE06000490F9001184
65582+:1078100090E6001290E40011333800FF00187082E7
65583+:1078200030CF00FF01CF5021014B6821308900FF8C
65584+:1078300031AAFFFF39230028000A60801460002C61
65585+:10784000020C482390E400123C198000372F0100FD
65586+:10785000308C00FF018B1821000310800045F821B7
65587+:10786000001F8400360706FFAD270004373F0900DC
65588+:1078700093EC001193EE0012372609800005C082B8
65589+:107880008DE4000C8CC5003431CD00FF01AB10211C
65590+:107890000058182100A4F8230008840000033F00CA
65591+:1078A00000F0302533F9FFFF318F00FC00D970253F
65592+:1078B0000158202101E9682100045080ADAE000C80
65593+:1078C0000E00007C012A80213C088008240B000463
65594+:1078D000350500800E00007EA0AB000902001021DB
65595+:1078E0008FBF00148FB0001003E0000827BD001800
65596+:1078F00090EC001190E300193C18080097183FCE57
65597+:10790000318200FF0002F882307000FF001FCE00BD
65598+:1079100000103C000327302500D870253C0F4000A4
65599+:1079200001CF68253C198000AD2D0000373F0900CC
65600+:1079300093EC001193EE0012372F010037260980D7
65601+:107940000005C0828DE4000C8CC5003431CD00FFF1
65602+:1079500001AB10210058182100A4F823000884006E
65603+:1079600000033F0000F0302533F9FFFF318F00FCAA
65604+:1079700000D970250158202101E9682100045080B8
65605+:10798000ADAE000C0E00007C012A80213C0880086E
65606+:10799000240B0004350500800E00007EA0AB00091A
65607+:1079A000020010218FBF00148FB0001003E0000808
65608+:1079B00027BD00180A0007C72408001227BDFFD002
65609+:1079C0003C038000AFB60028AFB50024AFB4002060
65610+:1079D000AFB10014AFBF002CAFB3001CAFB20018A2
65611+:1079E000AFB000103467010090E6000B309400FF48
65612+:1079F00030B500FF30C200300000B02110400099C7
65613+:107A000000008821346409809088000800082E0056
65614+:107A100000051E03046000C0240400048F86005487
65615+:107A20003C010800A0243FD83C0C8000AD800048F9
65616+:107A30003C048000348E010091CD000B31A5002064
65617+:107A400010A000073C078000349309809272000860
65618+:107A50000012860000107E0305E000C43C1F800871
65619+:107A600034EC0100918A000B34EB09809169000825
65620+:107A7000314400400004402B3123000800C8982303
65621+:107A80001460000224120003000090213C108000CA
65622+:107A900036180A8036040900970E002C90830011D6
65623+:107AA0009089001293050018307F00FF312800FFF5
65624+:107AB000024810210002C880930D0018033F78216E
65625+:107AC00001F1302130B100FF00D11821A78E0058FC
65626+:107AD0003C010800A4263FCE3C010800A4233FD06F
65627+:107AE00015A00002000000000000000D920B010B29
65628+:107AF0003065FFFF3C010800A4233FD2316A0040FB
65629+:107B00003C010800A4203FC83C010800A4203FC459
65630+:107B10001140000224A4000A24A4000B3091FFFFAE
65631+:107B20000E0001E7022020219206010B3C0C080008
65632+:107B3000958C3FD2004020210006698231A70001C8
65633+:107B40000E00060101872821004020210260282123
65634+:107B50000E00060C024030210E0007A1004020213B
65635+:107B600016C00069004020219212010B32560040DD
65636+:107B700012C000053C0500FF8C93000034AEFFFFEF
65637+:107B8000026E8024AC9000000E0001FB0220202138
65638+:107B90003C0F080091EF3FD831F10003122000168E
65639+:107BA0003C1380088F8200543C09800835280080EF
65640+:107BB000245F0001AD1F003C3C0580088CB9000427
65641+:107BC00003E02021033FC0231B000002AF9F0054AD
65642+:107BD0008CA400040E0006F8ACA400043C0780004E
65643+:107BE0008CEB00743C04800834830080004B5021EF
65644+:107BF000AC6A000C3C1380083670008002802021A3
65645+:107C000002A02821A200006B0E00075D3C1480003A
65646+:107C10008F920054368C0140AD92000C8F86004844
65647+:107C20003C151000344D000624D60001AF960048E4
65648+:107C30008FBF002CA18600128FB60028AD8D0014D6
65649+:107C40008FB3001CAE9501788FB200188FB5002459
65650+:107C50008FB400208FB100148FB0001003E0000833
65651+:107C600027BD003034640980908F0008000F760033
65652+:107C7000000E6E0305A00033347F090093F8001B4B
65653+:107C8000241900103C010800A0393FD8331300022A
65654+:107C90001260FF678F8600548F8200601446FF6574
65655+:107CA0003C0480000E00007C000000003C048008C2
65656+:107CB0003485008090A8000924060016310300FFD7
65657+:107CC0001066000D0000000090AB00093C070800A2
65658+:107CD00090E73FD824090008316400FF34EA00012E
65659+:107CE0003C010800A02A3FD81089002F240C000A6C
65660+:107CF000108C00282402000C0E00007E0000000002
65661+:107D00000A0008608F8600540E0007B9024028213F
65662+:107D10000A0008AE004020213C0B8008356A008034
65663+:107D20008D4600548CE9000C1120FF3DAF860054B5
65664+:107D3000240700143C010800A0273FD80A00085F70
65665+:107D40003C0C800090910008241200023C010800C5
65666+:107D5000A0323FD8323000201200000B2416000160
65667+:107D60008F8600540A0008602411000837F800804C
65668+:107D70008F020038AFE200048FF90004AF19003C15
65669+:107D80000A00086C3C0780008F8600540A000860D7
65670+:107D900024110004A0A200090E00007E00000000D3
65671+:107DA0000A0008608F860054240200140A00093A71
65672+:107DB000A0A2000927BDFFE8AFB000103C10800072
65673+:107DC000AFBF001436020100904400090E00075DA9
65674+:107DD000240500013C0480089099000E3483008043
65675+:107DE000909F000F906F00269089000A33F800FFE3
65676+:107DF00000196E000018740031EC00FF01AE502530
65677+:107E0000000C5A00014B3825312800FF3603014091
65678+:107E10003445600000E830252402FF813C04100056
65679+:107E2000AC66000C8FBF0014AC650014A062001299
65680+:107E3000AE0401788FB0001003E0000827BD0018E1
65681+:107E400027BDFFE8308400FFAFBF00100E00075DC4
65682+:107E500030A500FF3C05800034A4014034470040B9
65683+:107E60002406FF92AC870014A08600128F83005472
65684+:107E70008FBF00103C02100027BD0018AC83000C1F
65685+:107E800003E00008ACA2017827BDFFD8AFB0001016
65686+:107E9000308400FF30B000FF3C058000AFB100141B
65687+:107EA000AFBF0020AFB3001CAFB20018000410C277
65688+:107EB00034A60100320300023051000114600007B3
65689+:107EC00090D200093C098008353300809268000593
65690+:107ED0003107000810E0000C308A00100240202119
65691+:107EE0000E00078302202821240200018FBF0020FA
65692+:107EF0008FB3001C8FB200188FB100148FB0001028
65693+:107F000003E0000827BD00281540003434A50A000E
65694+:107F10008CB800248CAF0008130F004B00003821F0
65695+:107F20003C0D800835B30080926C00682406000286
65696+:107F3000318B00FF116600843C06800034C20100D2
65697+:107F40009263004C90590009307F00FF53F9000400
65698+:107F50003213007C10E00069000000003213007C46
65699+:107F60005660005C0240202116200009320D0001FD
65700+:107F70003C0C800035840100358B0A008D6500249F
65701+:107F80008C86000414A6FFD900001021320D0001D8
65702+:107F900011A0000E024020213C1880003710010083
65703+:107FA0008E0F000C8F8E005011EE000800000000B4
65704+:107FB0000E000843022028218E19000C3C1F800867
65705+:107FC00037F00080AE190050024020210E000771EA
65706+:107FD000022028210A00098F240200013C05080024
65707+:107FE0008CA5006424A400013C010800AC240064BA
65708+:107FF0001600000D00000000022028210E0007716D
65709+:1080000002402021926E0068240C000231CD00FF56
65710+:1080100011AC0022024020210E00094100000000A6
65711+:108020000A00098F240200010E00007024040001E0
65712+:10803000926B0025020B30250E00007EA266002503
65713+:108040000A0009D3022028218E6200188CDF000468
65714+:108050008CB9002400021E0217F9FFB13065007FC1
65715+:108060009268004C264400013093007F1265004066
65716+:10807000310300FF1464FFAB3C0D8008264700016C
65717+:1080800030F1007F30E200FF1225000B24070001D1
65718+:10809000004090210A00099C2411000124050004DD
65719+:1080A0000E000732240600010E0009410000000006
65720+:1080B0000A00098F240200012405FF8002452024C4
65721+:1080C00000859026324200FF004090210A00099C62
65722+:1080D000241100010E00084302202821320700303D
65723+:1080E00010E0FFA132100082024020210E00078321
65724+:1080F000022028210A00098F240200018E6900183D
65725+:108100000240202102202821012640250E0009647A
65726+:10811000AE6800189264004C240500032406000198
65727+:108120000E000732308400FF0E00007024040001AE
65728+:1081300092710025021150250E00007EA26A0025D2
65729+:108140000A00098F240200018E6F00183C1880007D
65730+:108150000240202101F87025022028210E0007711D
65731+:10816000AE6E00189264004C0A000A1B240500043D
65732+:10817000324A0080394900801469FF6A3C0D80084A
65733+:108180000A0009F42647000127BDFFC0AFB0001860
65734+:108190003C108000AFBF0038AFB70034AFB600303E
65735+:1081A000AFB5002CAFB40028AFB30024AFB20020AD
65736+:1081B0000E0005BEAFB1001C360201009045000B59
65737+:1081C0000E00097690440008144000E78FBF003885
65738+:1081D0003C08800835070080A0E0006B3606098067
65739+:1081E00090C50000240300503C17080026F73F907C
65740+:1081F00030A400FF3C13080026733FA01083000347
65741+:108200003C1080000000B82100009821241F0010BD
65742+:108210003611010036120A00361509808E580024E6
65743+:108220008E3400048EAF00208F8C00543C01080077
65744+:10823000A03F3FD836190A80972B002C8EF60000FD
65745+:10824000932A00180298702301EC68233C0108006F
65746+:10825000AC2E3FB43C010800AC2D3FB83C010800F7
65747+:10826000AC2C3FDCA78B005802C0F809315400FF4A
65748+:1082700030490002152000E930420001504000C49E
65749+:108280009227000992A90008312800081500000271
65750+:10829000241500030000A8213C0A80003543090092
65751+:1082A00035440A008C8D00249072001190700012E9
65752+:1082B000907F0011325900FF321100FF02B11021EE
65753+:1082C0000002C08033EF00FF0319B021028F70213C
65754+:1082D00002D4602125CB00103C010800A4363FCE1B
65755+:1082E0003C010800AC2D3FE03C010800A42C3FD02D
65756+:1082F0003C010800A42B3FCC3556010035540980C1
65757+:1083000035510E008F8700548F89005C8E850020C8
65758+:1083100024080006012730233C010800AC283FD484
65759+:1083200000A7282304C000B50000902104A000B3DA
65760+:1083300000C5502B114000B5000000003C010800B2
65761+:10834000AC263FB88E6200000040F8090000000033
65762+:108350003046000214C0007400408021304B000100
65763+:10836000556000118E6200043C0D08008DAD3FBCCD
65764+:108370003C0EC0003C04800001AE6025AE2C000025
65765+:108380008C980000330F000811E0FFFD0000000092
65766+:10839000963F000824120001A79F00408E39000478
65767+:1083A000AF9900388E6200040040F8090000000018
65768+:1083B0000202802532030002146000B300000000B6
65769+:1083C0003C09080095293FC43C06080094C63FD0EC
65770+:1083D0003C0A0800954A3FC63C0708008CE73FBCB2
65771+:1083E000012670213C0308008C633FE03C08080034
65772+:1083F00095083FDA01CA20218ED9000C00E9282116
65773+:10840000249F000200A878210067C02133E4FFFF09
65774+:10841000AF9900503C010800AC383FE03C01080037
65775+:10842000A42F3FC83C010800A42E3FD20E0001E754
65776+:10843000000000008F8D0048004020213C01080012
65777+:10844000A02D3FD98E62000825AC0001AF8C0048FA
65778+:108450000040F809000000008F85005402A0302180
65779+:108460000E00060C004020210E0007A10040202134
65780+:108470008E6B000C0160F809004020213C0A0800C6
65781+:10848000954A3FD23C06080094C63FC601464821A3
65782+:10849000252800020E0001FB3104FFFF3C05080007
65783+:1084A0008CA53FB43C0708008CE73FBC00A7202305
65784+:1084B0003C010800AC243FB414800006000000001A
65785+:1084C0003C0208008C423FD4344B00403C01080081
65786+:1084D000AC2B3FD4124000438F8E00448E2D0010F1
65787+:1084E0008F920044AE4D00208E2C0018AE4C00241C
65788+:1084F0003C04080094843FC80E0006FA0000000007
65789+:108500008F9F00548E6700103C010800AC3F3FDC99
65790+:1085100000E0F809000000003C1908008F393FB462
65791+:108520001720FF798F870054979300583C11800ED5
65792+:10853000321601000E000729A633002C16C0004594
65793+:10854000320300105460004C8EE5000432080040F5
65794+:108550005500001D8EF000088EE4000C0080F80924
65795+:10856000000000008FBF00388FB700348FB6003096
65796+:108570008FB5002C8FB400288FB300248FB2002059
65797+:108580008FB1001C8FB0001803E0000827BD004029
65798+:108590008F86003C36110E0000072E0000A6202515
65799+:1085A000AE0400808E4300208E500024AFA3001044
65800+:1085B000AE2300148FB20010AE320010AE30001C9B
65801+:1085C0000A000A75AE3000180200F8090000000029
65802+:1085D0008EE4000C0080F809000000000A000B2E59
65803+:1085E0008FBF003824180001240F0001A5C000200F
65804+:1085F000A5D800220A000B10ADCF00243C010800D2
65805+:10860000AC203FB80A000AA68E6200003C010800B8
65806+:10861000AC253FB80A000AA68E6200009224000929
65807+:108620000E000771000028218FBF00388FB700347B
65808+:108630008FB600308FB5002C8FB400288FB3002484
65809+:108640008FB200208FB1001C8FB0001803E000082B
65810+:1086500027BD00403C1480009295010900002821AC
65811+:108660000E00084332A400FF320300105060FFB830
65812+:10867000320800408EE5000400A0F8090000000068
65813+:108680000A000B28320800405240FFA89793005878
65814+:108690008E3400148F930044AE7400208E35001C7D
65815+:1086A000AE7500240A000B1F979300588F820014A8
65816+:1086B0000004218003E00008008210213C078008AC
65817+:1086C00034E200809043006900804021106000097E
65818+:1086D0003C0401003C0708008CE73FDC8F8300303E
65819+:1086E00000E32023048000089389001C14E30003A6
65820+:1086F0000100202103E00008008010213C0401005B
65821+:1087000003E00008008010211120000B00673823CF
65822+:108710003C0D800035AC0980918B007C316A0002F1
65823+:10872000114000202409003400E9702B15C0FFF12E
65824+:108730000100202100E938232403FFFC00A3C82402
65825+:1087400000E3C02400F9782B15E0FFEA030820219C
65826+:1087500030C400030004102314C000143049000387
65827+:108760000000302100A9782101E6702100EE682B7D
65828+:1087700011A0FFE03C0401002D3800010006C82BC9
65829+:10878000010548210319382414E0FFDA2524FFFCF1
65830+:108790002402FFFC00A218240068202103E0000846
65831+:1087A000008010210A000B9E240900303C0C800040
65832+:1087B0003586098090CB007C316A00041540FFE9C2
65833+:1087C000240600040A000BAD000030213C03080021
65834+:1087D0008C63005C8F82001827BDFFE0AFBF0018DC
65835+:1087E000AFB1001410620005AFB00010000329C043
65836+:1087F00024A40280AF840014AF8300183C108000D2
65837+:1088000036020A0094450032361101000E000B7F3B
65838+:1088100030A43FFF8E240000241FFF803C11008005
65839+:108820000082C021031F60243309007F000CC9406F
65840+:1088300003294025330E0078362F00033C0D10002D
65841+:10884000010D502501CF5825AE0C002836080980AF
65842+:10885000AE0C080CAE0B082CAE0A08309103006970
65843+:108860003C06800C0126382110600006AF870034DA
65844+:108870008D09003C8D03006C0123382318E0008231
65845+:10888000000000003C0B8008356A00803C1080002E
65846+:10889000A1400069360609808CC200383C06800081
65847+:1088A00034C50A0090A8003C310C00201180001A49
65848+:1088B000AF820030240D00013C0E800035D10A004B
65849+:1088C000A38D001CAF8000248E2400248F850024FB
65850+:1088D000240D0008AF800020AF8000283C01080074
65851+:1088E000A42D3FC63C010800A4203FDA0E000B83F4
65852+:1088F000000030219228003C8FBF00188FB1001477
65853+:108900008FB0001000086142AF82002C27BD00200C
65854+:1089100003E000083182000190B80032240E00010B
65855+:10892000330F00FF000F2182108E00412419000236
65856+:108930001099006434C40AC03C03800034640A0007
65857+:108940008C8F002415E0001E34660900909F0030D3
65858+:108950002418000533F9003F1338004E24030001AA
65859+:108960008F860020A383001CAF860028AF860024DA
65860+:108970003C0E800035D10A008E2400248F8500240F
65861+:10898000240D00083C010800A42D3FC63C0108004E
65862+:10899000A4203FDA0E000B83000000009228003C68
65863+:1089A0008FBF00188FB100148FB000100008614213
65864+:1089B000AF82002C27BD002003E0000831820001B7
65865+:1089C0008C8A00088C8B00248CD000643C0E8000C4
65866+:1089D00035D10A00014B2823AF900024A380001C4E
65867+:1089E000AF8500288E2400248F8600208F850024E8
65868+:1089F000240D00083C010800A42D3FC63C010800DE
65869+:108A0000A4203FDA0E000B83000000009228003CF7
65870+:108A10008FBF00188FB100148FB0001000086142A2
65871+:108A2000AF82002C27BD002003E000083182000146
65872+:108A300090A200303051003F5224002834C50AC0B3
65873+:108A40008CB000241600002234CB09008CA600480C
65874+:108A50003C0A7FFF3545FFFF00C510243C0E800017
65875+:108A6000AF82002035C509008F8800208CAD0060E2
65876+:108A7000010D602B15800002010020218CA40060F4
65877+:108A80000A000C22AF8400208D02006C0A000BFC4F
65878+:108A90003C0680008C8200488F8600203C097FFFC6
65879+:108AA0003527FFFF004788243C0480082403000189
65880+:108AB000AF910028AC80006CA383001C0A000C302E
65881+:108AC000AF8600248C9F00140A000C22AF9F002068
65882+:108AD0008D6200680A000C6C3C0E800034C4098072
65883+:108AE0008C8900708CA300140123382B10E0000443
65884+:108AF000000000008C8200700A000C6C3C0E8000AC
65885+:108B00008CA200140A000C6C3C0E80008F8500249F
65886+:108B100027BDFFE0AFBF0018AFB1001414A00008DC
65887+:108B2000AFB000103C04800034870A0090E60030AB
65888+:108B30002402000530C3003F106200B934840900EC
65889+:108B40008F91002000A080213C048000348E0A0018
65890+:108B50008DCD00043C0608008CC63FB831A73FFF0E
65891+:108B600000E6602B5580000100E03021938F001C4F
65892+:108B700011E0007800D0282B349F098093F9007C05
65893+:108B800033380002130000792403003400C3102B93
65894+:108B9000144000D90000000000C3302300D0282B6F
65895+:108BA0003C010800A4233FC414A0006E0200182159
65896+:108BB0003C0408008C843FB40064402B5500000145
65897+:108BC000006020213C05800034A90A00912A003C65
65898+:108BD0003C010800AC243FBC31430020146000037A
65899+:108BE0000000482134AB0E008D6900188F88002CDE
65900+:108BF0000128202B1080005F000000003C050800C9
65901+:108C00008CA53FBC00A96821010D602B1180005C80
65902+:108C100000B0702B0109382300E028213C01080036
65903+:108C2000AC273FBC12000003240AFFFC10B0008DEB
65904+:108C30003224000300AA18243C010800A4203FDAD3
65905+:108C40003C010800AC233FBC006028218F84002435
65906+:108C5000120400063C0B80088D6C006C0200202181
65907+:108C6000AF91002025900001AD70006C8F8D002821
65908+:108C700000858823AF91002401A52023AF8400281C
65909+:108C80001220000224070018240700103C18800856
65910+:108C90003706008090CF00683C010800A0273FD82D
65911+:108CA0002407000131EE00FF11C70047000000005B
65912+:108CB00014800018000028213C06800034D109806F
65913+:108CC00034CD010091A600098E2C001824C40001A7
65914+:108CD000000C86023205007F308B007F1165007F1B
65915+:108CE0002407FF803C19800837290080A124004C0C
65916+:108CF0003C0808008D083FD4241800023C010800FD
65917+:108D0000A0384019350F00083C010800AC2F3FD4B3
65918+:108D1000240500103C02800034440A009083003C8B
65919+:108D2000307F002013E0000500A02021240A00016C
65920+:108D30003C010800AC2A3FBC34A400018FBF0018DE
65921+:108D40008FB100148FB000100080102103E00008E4
65922+:108D500027BD00203C010800A4203FC410A0FF94C0
65923+:108D6000020018210A000CC000C018210A000CB72C
65924+:108D7000240300303C0508008CA53FBC00B0702BDC
65925+:108D800011C0FFA8000000003C19080097393FC43B
65926+:108D90000325C0210307782B11E000072CAA00044B
65927+:108DA0003C0360008C625404305F003F17E0FFE337
65928+:108DB000240400422CAA00041140FF9A240400421B
65929+:108DC0000A000D248FBF00181528FFB9000000000D
65930+:108DD0008CCA00183C1F800024020002015F182585
65931+:108DE000ACC3001837F90A00A0C200689329003C00
65932+:108DF0002404000400A01021312800203C010800B8
65933+:108E0000A0244019110000022405001024020001D2
65934+:108E10003C010800AC223FB40A000D1A3C0280005D
65935+:108E20008F8800288C8900600109282B14A000027B
65936+:108E3000010088218C9100603C048000348B0E007E
65937+:108E40008D640018240A000102202821022030210C
65938+:108E5000A38A001C0E000B83022080210A000CA6AE
65939+:108E6000AF82002C00045823122000073164000355
65940+:108E70003C0E800035C7098090ED007C31AC0004C9
65941+:108E800015800019248F00043C010800A4243FDA57
65942+:108E90003C1F080097FF3FDA03E5C82100D9C02B2B
65943+:108EA0001300FF6B8F8400242CA6000514C0FFA3C1
65944+:108EB0002404004230A200031440000200A2182340
65945+:108EC00024A3FFFC3C010800AC233FBC3C0108008C
65946+:108ED000A4203FDA0A000CE70060282100C77024B4
65947+:108EE0000A000D0D01C720263C010800A42F3FDA1F
65948+:108EF0000A000D78000000003C010800AC203FBCD7
65949+:108F00000A000D23240400428F8300283C058000C2
65950+:108F100034AA0A00146000060000102191470030B6
65951+:108F20002406000530E400FF108600030000000066
65952+:108F300003E0000800000000914B0048316900FF89
65953+:108F4000000941C21500FFFA3C0680083C040800F5
65954+:108F500094843FC43C0308008C633FDC3C19080048
65955+:108F60008F393FBC3C0F080095EF3FDA0064C02109
65956+:108F70008CCD00040319702101CF602134AB0E00A9
65957+:108F8000018D282318A0001D00000000914F004C07
65958+:108F90008F8C0034956D001031EE00FF8D89000438
65959+:108FA00001AE30238D8A000030CEFFFF000E290075
65960+:108FB0000125C82100003821014720210325182B55
65961+:108FC0000083C021AD990004AD980000918F000A84
65962+:108FD00001CF6821A18D000A956500128F8A0034A7
65963+:108FE000A5450008954B003825690001A5490038C2
65964+:108FF0009148000D35070008A147000D03E0000867
65965+:109000000000000027BDFFD8AFB000189388001CF7
65966+:109010008FB000143C0A80003C197FFF8F8700242A
65967+:109020003738FFFFAFBF0020AFB1001C355F0A002B
65968+:109030000218182493EB003C00087FC03C02BFFFDD
65969+:10904000006F60252CF000013449FFFF3C1F080031
65970+:109050008FFF3FDC8F9900303C18080097183FD2F3
65971+:1090600001897824001047803C07EFFF3C05F0FFA2
65972+:1090700001E818253C1180003169002034E2FFFF2F
65973+:1090800034ADFFFF362E098027A50010240600020C
65974+:1090900003F96023270B0002354A0E0000621824F2
65975+:1090A0000080802115200002000040218D48001C16
65976+:1090B000A7AB0012058000392407000030E800FF4C
65977+:1090C00000083F00006758253C028008AFAB001441
65978+:1090D000344F008091EA00683C08080091083FD9AD
65979+:1090E0003C09DFFF352CFFFF000AF82B3C0208008B
65980+:1090F00094423FCCA3A80011016CC024001FCF40B4
65981+:10910000031918258FA70010AFA300143C0C08000A
65982+:10911000918C3FDBA7A200168FAB001400ED482412
65983+:109120003C0F01003C0A0FFF012FC82531980003B6
65984+:10913000355FFFFF016D40243C027000033F38247F
65985+:1091400000181E0000E2482501037825AFAF001487
65986+:10915000AFA9001091CC007C0E000092A3AC0015CA
65987+:10916000362D0A0091A6003C30C400201080000675
65988+:10917000260200083C11080096313FC8262EFFFF4A
65989+:109180003C010800A42E3FC88FBF00208FB1001CF7
65990+:109190008FB0001803E0000827BD00288F8B002C3B
65991+:1091A000010B502B5540FFC5240700010A000E0497
65992+:1091B00030E800FF9383001C3C02800027BDFFD8ED
65993+:1091C00034480A0000805021AFBF002034460AC056
65994+:1091D000010028211060000E3444098091070030FE
65995+:1091E000240B00058F89002030EC003F118B000B11
65996+:1091F00000003821AFA900103C0B80088D69006C7D
65997+:10920000AFAA00180E00015AAFA90014A380001CD9
65998+:109210008FBF002003E0000827BD00288D1F0048F5
65999+:109220003C1808008F183FBC8F9900283C027FFF34
66000+:109230008D0800443443FFFFAFA900103C0B8008A9
66001+:109240008D69006C03E370240319782101CF682332
66002+:1092500001A83821AFAA00180E00015AAFA90014C6
66003+:109260000A000E58A380001C3C05800034A60A00AA
66004+:1092700090C7003C3C06080094C63FDA3C02080058
66005+:109280008C423FD430E30020000624001060001E12
66006+:10929000004438253C0880083505008090A300680C
66007+:1092A00000004821240800010000282124040001B6
66008+:1092B0003C0680008CCD017805A0FFFE34CF014034
66009+:1092C000ADE800083C0208008C423FDCA5E5000444
66010+:1092D000A5E40006ADE2000C3C04080090843FD9F0
66011+:1092E0003C03800834790080A1E40012ADE700144B
66012+:1092F000A5E900189338004C3C0E1000A1F8002D91
66013+:1093000003E00008ACCE017834A90E008D28001CC3
66014+:109310003C0C08008D8C3FBC952B0016952A001440
66015+:10932000018648213164FFFF0A000E803145FFFFAE
66016+:109330003C04800034830A009065003C30A2002089
66017+:109340001040001934870E00000040210000382131
66018+:10935000000020213C0680008CC901780520FFFE1A
66019+:1093600034CA014034CF010091EB0009AD48000838
66020+:109370003C0E08008DCE3FDC240DFF91240C0040F4
66021+:109380003C081000A5440004A5470006AD4E000CA3
66022+:10939000A14D0012AD4C0014A5400018A14B002DAA
66023+:1093A00003E00008ACC801788CE8001894E60012CD
66024+:1093B00094E4001030C7FFFF0A000EA93084FFFFBD
66025+:1093C0003C04800034830A009065003C30A20020F9
66026+:1093D0001040002727BDFFF82409000100003821B4
66027+:1093E000240800013C0680008CCA01780540FFFE7D
66028+:1093F0003C0280FF34C40100908D00093C0C080041
66029+:10940000918C4019A3AD00038FAB00003185007F24
66030+:109410003459FFFF01665025AFAA00009083000A6F
66031+:10942000A3A0000200057E00A3A300018FB80000E6
66032+:1094300034CB0140240C30000319702401CF68257F
66033+:10944000AD6D000C27BD0008AD6C0014A5600018C0
66034+:10945000AD690008A56700042409FF80A56800061F
66035+:109460003C081000A169001203E00008ACC80178B4
66036+:1094700034870E008CE9001894E6001294E4001082
66037+:1094800030C8FFFF0A000ECD3087FFFF27BDFFE089
66038+:10949000AFB100143C118000AFB00010AFBF001896
66039+:1094A00036380A00970F0032363001000E000B7F6D
66040+:1094B00031E43FFF8E0E0000240DFF803C042000AD
66041+:1094C00001C25821016D6024000C4940316A007FBF
66042+:1094D000012A4025010438253C048008AE270830C5
66043+:1094E0003486008090C500682403000230A200FF8B
66044+:1094F000104300048F9F00208F990024AC9F0068C8
66045+:10950000AC9900648FBF00188FB100148FB00010A9
66046+:1095100003E0000827BD00203C0A0800254A3A80E5
66047+:109520003C09080025293B103C08080025082F1C91
66048+:109530003C07080024E73BDC3C06080024C639044D
66049+:109540003C05080024A536583C0408002484325CFD
66050+:109550003C030800246339B83C0208002442375415
66051+:109560003C010800AC2A3F983C010800AC293F941C
66052+:109570003C010800AC283F903C010800AC273F9C10
66053+:109580003C010800AC263FAC3C010800AC253FA4E0
66054+:109590003C010800AC243FA03C010800AC233FB0D4
66055+:1095A0003C010800AC223FA803E0000800000000D6
66056+:1095B00080000940800009008008010080080080C8
66057+:1095C00080080000800E00008008008080080000F5
66058+:1095D00080000A8080000A00800009808000090065
66059+:00000001FF
66060diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
66061index eb14e05..5156de7 100644
66062--- a/fs/9p/vfs_addr.c
66063+++ b/fs/9p/vfs_addr.c
66064@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
66065
66066 retval = v9fs_file_write_internal(inode,
66067 v9inode->writeback_fid,
66068- (__force const char __user *)buffer,
66069+ (const char __force_user *)buffer,
66070 len, &offset, 0);
66071 if (retval > 0)
66072 retval = 0;
66073diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
66074index 3662f1d..90558b5 100644
66075--- a/fs/9p/vfs_inode.c
66076+++ b/fs/9p/vfs_inode.c
66077@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
66078 void
66079 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
66080 {
66081- char *s = nd_get_link(nd);
66082+ const char *s = nd_get_link(nd);
66083
66084 p9_debug(P9_DEBUG_VFS, " %pd %s\n",
66085 dentry, IS_ERR(s) ? "<error>" : s);
66086diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
66087index 270c481..0d8a962 100644
66088--- a/fs/Kconfig.binfmt
66089+++ b/fs/Kconfig.binfmt
66090@@ -106,7 +106,7 @@ config HAVE_AOUT
66091
66092 config BINFMT_AOUT
66093 tristate "Kernel support for a.out and ECOFF binaries"
66094- depends on HAVE_AOUT
66095+ depends on HAVE_AOUT && BROKEN
66096 ---help---
66097 A.out (Assembler.OUTput) is a set of formats for libraries and
66098 executables used in the earliest versions of UNIX. Linux used
66099diff --git a/fs/afs/inode.c b/fs/afs/inode.c
66100index 8a1d38e..300a14e 100644
66101--- a/fs/afs/inode.c
66102+++ b/fs/afs/inode.c
66103@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
66104 struct afs_vnode *vnode;
66105 struct super_block *sb;
66106 struct inode *inode;
66107- static atomic_t afs_autocell_ino;
66108+ static atomic_unchecked_t afs_autocell_ino;
66109
66110 _enter("{%x:%u},%*.*s,",
66111 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
66112@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
66113 data.fid.unique = 0;
66114 data.fid.vnode = 0;
66115
66116- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
66117+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
66118 afs_iget5_autocell_test, afs_iget5_set,
66119 &data);
66120 if (!inode) {
66121diff --git a/fs/aio.c b/fs/aio.c
66122index a793f70..46f45af 100644
66123--- a/fs/aio.c
66124+++ b/fs/aio.c
66125@@ -404,7 +404,7 @@ static int aio_setup_ring(struct kioctx *ctx)
66126 size += sizeof(struct io_event) * nr_events;
66127
66128 nr_pages = PFN_UP(size);
66129- if (nr_pages < 0)
66130+ if (nr_pages <= 0)
66131 return -EINVAL;
66132
66133 file = aio_private_file(ctx, nr_pages);
66134diff --git a/fs/attr.c b/fs/attr.c
66135index 6530ced..4a827e2 100644
66136--- a/fs/attr.c
66137+++ b/fs/attr.c
66138@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
66139 unsigned long limit;
66140
66141 limit = rlimit(RLIMIT_FSIZE);
66142+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
66143 if (limit != RLIM_INFINITY && offset > limit)
66144 goto out_sig;
66145 if (offset > inode->i_sb->s_maxbytes)
66146diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
66147index 116fd38..c04182da 100644
66148--- a/fs/autofs4/waitq.c
66149+++ b/fs/autofs4/waitq.c
66150@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
66151 {
66152 unsigned long sigpipe, flags;
66153 mm_segment_t fs;
66154- const char *data = (const char *)addr;
66155+ const char __user *data = (const char __force_user *)addr;
66156 ssize_t wr = 0;
66157
66158 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
66159@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
66160 return 1;
66161 }
66162
66163+#ifdef CONFIG_GRKERNSEC_HIDESYM
66164+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
66165+#endif
66166+
66167 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
66168 enum autofs_notify notify)
66169 {
66170@@ -385,7 +389,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
66171
66172 /* If this is a direct mount request create a dummy name */
66173 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
66174+#ifdef CONFIG_GRKERNSEC_HIDESYM
66175+ /* this name does get written to userland via autofs4_write() */
66176+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
66177+#else
66178 qstr.len = sprintf(name, "%p", dentry);
66179+#endif
66180 else {
66181 qstr.len = autofs4_getpath(sbi, dentry, &name);
66182 if (!qstr.len) {
66183diff --git a/fs/befs/endian.h b/fs/befs/endian.h
66184index 2722387..56059b5 100644
66185--- a/fs/befs/endian.h
66186+++ b/fs/befs/endian.h
66187@@ -11,7 +11,7 @@
66188
66189 #include <asm/byteorder.h>
66190
66191-static inline u64
66192+static inline u64 __intentional_overflow(-1)
66193 fs64_to_cpu(const struct super_block *sb, fs64 n)
66194 {
66195 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
66196@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
66197 return (__force fs64)cpu_to_be64(n);
66198 }
66199
66200-static inline u32
66201+static inline u32 __intentional_overflow(-1)
66202 fs32_to_cpu(const struct super_block *sb, fs32 n)
66203 {
66204 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
66205@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
66206 return (__force fs32)cpu_to_be32(n);
66207 }
66208
66209-static inline u16
66210+static inline u16 __intentional_overflow(-1)
66211 fs16_to_cpu(const struct super_block *sb, fs16 n)
66212 {
66213 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
66214diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
66215index 4c55668..eeae150 100644
66216--- a/fs/binfmt_aout.c
66217+++ b/fs/binfmt_aout.c
66218@@ -16,6 +16,7 @@
66219 #include <linux/string.h>
66220 #include <linux/fs.h>
66221 #include <linux/file.h>
66222+#include <linux/security.h>
66223 #include <linux/stat.h>
66224 #include <linux/fcntl.h>
66225 #include <linux/ptrace.h>
66226@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
66227 #endif
66228 # define START_STACK(u) ((void __user *)u.start_stack)
66229
66230+ memset(&dump, 0, sizeof(dump));
66231+
66232 fs = get_fs();
66233 set_fs(KERNEL_DS);
66234 has_dumped = 1;
66235@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
66236
66237 /* If the size of the dump file exceeds the rlimit, then see what would happen
66238 if we wrote the stack, but not the data area. */
66239+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
66240 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
66241 dump.u_dsize = 0;
66242
66243 /* Make sure we have enough room to write the stack and data areas. */
66244+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
66245 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
66246 dump.u_ssize = 0;
66247
66248@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
66249 rlim = rlimit(RLIMIT_DATA);
66250 if (rlim >= RLIM_INFINITY)
66251 rlim = ~0;
66252+
66253+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
66254 if (ex.a_data + ex.a_bss > rlim)
66255 return -ENOMEM;
66256
66257@@ -261,6 +268,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
66258
66259 install_exec_creds(bprm);
66260
66261+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66262+ current->mm->pax_flags = 0UL;
66263+#endif
66264+
66265+#ifdef CONFIG_PAX_PAGEEXEC
66266+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
66267+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
66268+
66269+#ifdef CONFIG_PAX_EMUTRAMP
66270+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
66271+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
66272+#endif
66273+
66274+#ifdef CONFIG_PAX_MPROTECT
66275+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
66276+ current->mm->pax_flags |= MF_PAX_MPROTECT;
66277+#endif
66278+
66279+ }
66280+#endif
66281+
66282 if (N_MAGIC(ex) == OMAGIC) {
66283 unsigned long text_addr, map_size;
66284 loff_t pos;
66285@@ -312,7 +340,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
66286 return error;
66287
66288 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
66289- PROT_READ | PROT_WRITE | PROT_EXEC,
66290+ PROT_READ | PROT_WRITE,
66291 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
66292 fd_offset + ex.a_text);
66293 if (error != N_DATADDR(ex))
66294diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
66295index d925f55..d31f527 100644
66296--- a/fs/binfmt_elf.c
66297+++ b/fs/binfmt_elf.c
66298@@ -34,6 +34,7 @@
66299 #include <linux/utsname.h>
66300 #include <linux/coredump.h>
66301 #include <linux/sched.h>
66302+#include <linux/xattr.h>
66303 #include <asm/uaccess.h>
66304 #include <asm/param.h>
66305 #include <asm/page.h>
66306@@ -47,7 +48,7 @@
66307
66308 static int load_elf_binary(struct linux_binprm *bprm);
66309 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
66310- int, int, unsigned long);
66311+ int, int, unsigned long) __intentional_overflow(-1);
66312
66313 #ifdef CONFIG_USELIB
66314 static int load_elf_library(struct file *);
66315@@ -65,6 +66,14 @@ static int elf_core_dump(struct coredump_params *cprm);
66316 #define elf_core_dump NULL
66317 #endif
66318
66319+#ifdef CONFIG_PAX_MPROTECT
66320+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
66321+#endif
66322+
66323+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
66324+static void elf_handle_mmap(struct file *file);
66325+#endif
66326+
66327 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
66328 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
66329 #else
66330@@ -84,6 +93,15 @@ static struct linux_binfmt elf_format = {
66331 .load_binary = load_elf_binary,
66332 .load_shlib = load_elf_library,
66333 .core_dump = elf_core_dump,
66334+
66335+#ifdef CONFIG_PAX_MPROTECT
66336+ .handle_mprotect= elf_handle_mprotect,
66337+#endif
66338+
66339+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
66340+ .handle_mmap = elf_handle_mmap,
66341+#endif
66342+
66343 .min_coredump = ELF_EXEC_PAGESIZE,
66344 };
66345
66346@@ -91,6 +109,8 @@ static struct linux_binfmt elf_format = {
66347
66348 static int set_brk(unsigned long start, unsigned long end)
66349 {
66350+ unsigned long e = end;
66351+
66352 start = ELF_PAGEALIGN(start);
66353 end = ELF_PAGEALIGN(end);
66354 if (end > start) {
66355@@ -99,7 +119,7 @@ static int set_brk(unsigned long start, unsigned long end)
66356 if (BAD_ADDR(addr))
66357 return addr;
66358 }
66359- current->mm->start_brk = current->mm->brk = end;
66360+ current->mm->start_brk = current->mm->brk = e;
66361 return 0;
66362 }
66363
66364@@ -160,12 +180,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
66365 elf_addr_t __user *u_rand_bytes;
66366 const char *k_platform = ELF_PLATFORM;
66367 const char *k_base_platform = ELF_BASE_PLATFORM;
66368- unsigned char k_rand_bytes[16];
66369+ u32 k_rand_bytes[4];
66370 int items;
66371 elf_addr_t *elf_info;
66372 int ei_index = 0;
66373 const struct cred *cred = current_cred();
66374 struct vm_area_struct *vma;
66375+ unsigned long saved_auxv[AT_VECTOR_SIZE];
66376
66377 /*
66378 * In some cases (e.g. Hyper-Threading), we want to avoid L1
66379@@ -207,8 +228,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
66380 * Generate 16 random bytes for userspace PRNG seeding.
66381 */
66382 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
66383- u_rand_bytes = (elf_addr_t __user *)
66384- STACK_ALLOC(p, sizeof(k_rand_bytes));
66385+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
66386+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
66387+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
66388+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
66389+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
66390+ u_rand_bytes = (elf_addr_t __user *) p;
66391 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
66392 return -EFAULT;
66393
66394@@ -323,9 +348,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
66395 return -EFAULT;
66396 current->mm->env_end = p;
66397
66398+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
66399+
66400 /* Put the elf_info on the stack in the right place. */
66401 sp = (elf_addr_t __user *)envp + 1;
66402- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
66403+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
66404 return -EFAULT;
66405 return 0;
66406 }
66407@@ -514,14 +541,14 @@ static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
66408 an ELF header */
66409
66410 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
66411- struct file *interpreter, unsigned long *interp_map_addr,
66412+ struct file *interpreter,
66413 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
66414 {
66415 struct elf_phdr *eppnt;
66416- unsigned long load_addr = 0;
66417+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
66418 int load_addr_set = 0;
66419 unsigned long last_bss = 0, elf_bss = 0;
66420- unsigned long error = ~0UL;
66421+ unsigned long error = -EINVAL;
66422 unsigned long total_size;
66423 int i;
66424
66425@@ -541,6 +568,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
66426 goto out;
66427 }
66428
66429+#ifdef CONFIG_PAX_SEGMEXEC
66430+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
66431+ pax_task_size = SEGMEXEC_TASK_SIZE;
66432+#endif
66433+
66434 eppnt = interp_elf_phdata;
66435 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
66436 if (eppnt->p_type == PT_LOAD) {
66437@@ -564,8 +596,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
66438 map_addr = elf_map(interpreter, load_addr + vaddr,
66439 eppnt, elf_prot, elf_type, total_size);
66440 total_size = 0;
66441- if (!*interp_map_addr)
66442- *interp_map_addr = map_addr;
66443 error = map_addr;
66444 if (BAD_ADDR(map_addr))
66445 goto out;
66446@@ -584,8 +614,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
66447 k = load_addr + eppnt->p_vaddr;
66448 if (BAD_ADDR(k) ||
66449 eppnt->p_filesz > eppnt->p_memsz ||
66450- eppnt->p_memsz > TASK_SIZE ||
66451- TASK_SIZE - eppnt->p_memsz < k) {
66452+ eppnt->p_memsz > pax_task_size ||
66453+ pax_task_size - eppnt->p_memsz < k) {
66454 error = -ENOMEM;
66455 goto out;
66456 }
66457@@ -624,9 +654,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
66458 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
66459
66460 /* Map the last of the bss segment */
66461- error = vm_brk(elf_bss, last_bss - elf_bss);
66462- if (BAD_ADDR(error))
66463- goto out;
66464+ if (last_bss > elf_bss) {
66465+ error = vm_brk(elf_bss, last_bss - elf_bss);
66466+ if (BAD_ADDR(error))
66467+ goto out;
66468+ }
66469 }
66470
66471 error = load_addr;
66472@@ -634,6 +666,336 @@ out:
66473 return error;
66474 }
66475
66476+#ifdef CONFIG_PAX_PT_PAX_FLAGS
66477+#ifdef CONFIG_PAX_SOFTMODE
66478+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
66479+{
66480+ unsigned long pax_flags = 0UL;
66481+
66482+#ifdef CONFIG_PAX_PAGEEXEC
66483+ if (elf_phdata->p_flags & PF_PAGEEXEC)
66484+ pax_flags |= MF_PAX_PAGEEXEC;
66485+#endif
66486+
66487+#ifdef CONFIG_PAX_SEGMEXEC
66488+ if (elf_phdata->p_flags & PF_SEGMEXEC)
66489+ pax_flags |= MF_PAX_SEGMEXEC;
66490+#endif
66491+
66492+#ifdef CONFIG_PAX_EMUTRAMP
66493+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
66494+ pax_flags |= MF_PAX_EMUTRAMP;
66495+#endif
66496+
66497+#ifdef CONFIG_PAX_MPROTECT
66498+ if (elf_phdata->p_flags & PF_MPROTECT)
66499+ pax_flags |= MF_PAX_MPROTECT;
66500+#endif
66501+
66502+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
66503+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
66504+ pax_flags |= MF_PAX_RANDMMAP;
66505+#endif
66506+
66507+ return pax_flags;
66508+}
66509+#endif
66510+
66511+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
66512+{
66513+ unsigned long pax_flags = 0UL;
66514+
66515+#ifdef CONFIG_PAX_PAGEEXEC
66516+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
66517+ pax_flags |= MF_PAX_PAGEEXEC;
66518+#endif
66519+
66520+#ifdef CONFIG_PAX_SEGMEXEC
66521+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
66522+ pax_flags |= MF_PAX_SEGMEXEC;
66523+#endif
66524+
66525+#ifdef CONFIG_PAX_EMUTRAMP
66526+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
66527+ pax_flags |= MF_PAX_EMUTRAMP;
66528+#endif
66529+
66530+#ifdef CONFIG_PAX_MPROTECT
66531+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
66532+ pax_flags |= MF_PAX_MPROTECT;
66533+#endif
66534+
66535+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
66536+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
66537+ pax_flags |= MF_PAX_RANDMMAP;
66538+#endif
66539+
66540+ return pax_flags;
66541+}
66542+#endif
66543+
66544+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
66545+#ifdef CONFIG_PAX_SOFTMODE
66546+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
66547+{
66548+ unsigned long pax_flags = 0UL;
66549+
66550+#ifdef CONFIG_PAX_PAGEEXEC
66551+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
66552+ pax_flags |= MF_PAX_PAGEEXEC;
66553+#endif
66554+
66555+#ifdef CONFIG_PAX_SEGMEXEC
66556+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
66557+ pax_flags |= MF_PAX_SEGMEXEC;
66558+#endif
66559+
66560+#ifdef CONFIG_PAX_EMUTRAMP
66561+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
66562+ pax_flags |= MF_PAX_EMUTRAMP;
66563+#endif
66564+
66565+#ifdef CONFIG_PAX_MPROTECT
66566+ if (pax_flags_softmode & MF_PAX_MPROTECT)
66567+ pax_flags |= MF_PAX_MPROTECT;
66568+#endif
66569+
66570+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
66571+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
66572+ pax_flags |= MF_PAX_RANDMMAP;
66573+#endif
66574+
66575+ return pax_flags;
66576+}
66577+#endif
66578+
66579+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
66580+{
66581+ unsigned long pax_flags = 0UL;
66582+
66583+#ifdef CONFIG_PAX_PAGEEXEC
66584+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
66585+ pax_flags |= MF_PAX_PAGEEXEC;
66586+#endif
66587+
66588+#ifdef CONFIG_PAX_SEGMEXEC
66589+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
66590+ pax_flags |= MF_PAX_SEGMEXEC;
66591+#endif
66592+
66593+#ifdef CONFIG_PAX_EMUTRAMP
66594+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
66595+ pax_flags |= MF_PAX_EMUTRAMP;
66596+#endif
66597+
66598+#ifdef CONFIG_PAX_MPROTECT
66599+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
66600+ pax_flags |= MF_PAX_MPROTECT;
66601+#endif
66602+
66603+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
66604+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
66605+ pax_flags |= MF_PAX_RANDMMAP;
66606+#endif
66607+
66608+ return pax_flags;
66609+}
66610+#endif
66611+
66612+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66613+static unsigned long pax_parse_defaults(void)
66614+{
66615+ unsigned long pax_flags = 0UL;
66616+
66617+#ifdef CONFIG_PAX_SOFTMODE
66618+ if (pax_softmode)
66619+ return pax_flags;
66620+#endif
66621+
66622+#ifdef CONFIG_PAX_PAGEEXEC
66623+ pax_flags |= MF_PAX_PAGEEXEC;
66624+#endif
66625+
66626+#ifdef CONFIG_PAX_SEGMEXEC
66627+ pax_flags |= MF_PAX_SEGMEXEC;
66628+#endif
66629+
66630+#ifdef CONFIG_PAX_MPROTECT
66631+ pax_flags |= MF_PAX_MPROTECT;
66632+#endif
66633+
66634+#ifdef CONFIG_PAX_RANDMMAP
66635+ if (randomize_va_space)
66636+ pax_flags |= MF_PAX_RANDMMAP;
66637+#endif
66638+
66639+ return pax_flags;
66640+}
66641+
66642+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
66643+{
66644+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
66645+
66646+#ifdef CONFIG_PAX_EI_PAX
66647+
66648+#ifdef CONFIG_PAX_SOFTMODE
66649+ if (pax_softmode)
66650+ return pax_flags;
66651+#endif
66652+
66653+ pax_flags = 0UL;
66654+
66655+#ifdef CONFIG_PAX_PAGEEXEC
66656+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
66657+ pax_flags |= MF_PAX_PAGEEXEC;
66658+#endif
66659+
66660+#ifdef CONFIG_PAX_SEGMEXEC
66661+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
66662+ pax_flags |= MF_PAX_SEGMEXEC;
66663+#endif
66664+
66665+#ifdef CONFIG_PAX_EMUTRAMP
66666+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
66667+ pax_flags |= MF_PAX_EMUTRAMP;
66668+#endif
66669+
66670+#ifdef CONFIG_PAX_MPROTECT
66671+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
66672+ pax_flags |= MF_PAX_MPROTECT;
66673+#endif
66674+
66675+#ifdef CONFIG_PAX_ASLR
66676+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
66677+ pax_flags |= MF_PAX_RANDMMAP;
66678+#endif
66679+
66680+#endif
66681+
66682+ return pax_flags;
66683+
66684+}
66685+
66686+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
66687+{
66688+
66689+#ifdef CONFIG_PAX_PT_PAX_FLAGS
66690+ unsigned long i;
66691+
66692+ for (i = 0UL; i < elf_ex->e_phnum; i++)
66693+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
66694+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
66695+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
66696+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
66697+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
66698+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
66699+ return PAX_PARSE_FLAGS_FALLBACK;
66700+
66701+#ifdef CONFIG_PAX_SOFTMODE
66702+ if (pax_softmode)
66703+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
66704+ else
66705+#endif
66706+
66707+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
66708+ break;
66709+ }
66710+#endif
66711+
66712+ return PAX_PARSE_FLAGS_FALLBACK;
66713+}
66714+
66715+static unsigned long pax_parse_xattr_pax(struct file * const file)
66716+{
66717+
66718+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
66719+ ssize_t xattr_size, i;
66720+ unsigned char xattr_value[sizeof("pemrs") - 1];
66721+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
66722+
66723+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
66724+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
66725+ return PAX_PARSE_FLAGS_FALLBACK;
66726+
66727+ for (i = 0; i < xattr_size; i++)
66728+ switch (xattr_value[i]) {
66729+ default:
66730+ return PAX_PARSE_FLAGS_FALLBACK;
66731+
66732+#define parse_flag(option1, option2, flag) \
66733+ case option1: \
66734+ if (pax_flags_hardmode & MF_PAX_##flag) \
66735+ return PAX_PARSE_FLAGS_FALLBACK;\
66736+ pax_flags_hardmode |= MF_PAX_##flag; \
66737+ break; \
66738+ case option2: \
66739+ if (pax_flags_softmode & MF_PAX_##flag) \
66740+ return PAX_PARSE_FLAGS_FALLBACK;\
66741+ pax_flags_softmode |= MF_PAX_##flag; \
66742+ break;
66743+
66744+ parse_flag('p', 'P', PAGEEXEC);
66745+ parse_flag('e', 'E', EMUTRAMP);
66746+ parse_flag('m', 'M', MPROTECT);
66747+ parse_flag('r', 'R', RANDMMAP);
66748+ parse_flag('s', 'S', SEGMEXEC);
66749+
66750+#undef parse_flag
66751+ }
66752+
66753+ if (pax_flags_hardmode & pax_flags_softmode)
66754+ return PAX_PARSE_FLAGS_FALLBACK;
66755+
66756+#ifdef CONFIG_PAX_SOFTMODE
66757+ if (pax_softmode)
66758+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
66759+ else
66760+#endif
66761+
66762+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
66763+#else
66764+ return PAX_PARSE_FLAGS_FALLBACK;
66765+#endif
66766+
66767+}
66768+
66769+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
66770+{
66771+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
66772+
66773+ pax_flags = pax_parse_defaults();
66774+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
66775+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
66776+ xattr_pax_flags = pax_parse_xattr_pax(file);
66777+
66778+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
66779+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
66780+ pt_pax_flags != xattr_pax_flags)
66781+ return -EINVAL;
66782+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
66783+ pax_flags = xattr_pax_flags;
66784+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
66785+ pax_flags = pt_pax_flags;
66786+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
66787+ pax_flags = ei_pax_flags;
66788+
66789+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
66790+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
66791+ if ((__supported_pte_mask & _PAGE_NX))
66792+ pax_flags &= ~MF_PAX_SEGMEXEC;
66793+ else
66794+ pax_flags &= ~MF_PAX_PAGEEXEC;
66795+ }
66796+#endif
66797+
66798+ if (0 > pax_check_flags(&pax_flags))
66799+ return -EINVAL;
66800+
66801+ current->mm->pax_flags = pax_flags;
66802+ return 0;
66803+}
66804+#endif
66805+
66806 /*
66807 * These are the functions used to load ELF style executables and shared
66808 * libraries. There is no binary dependent code anywhere else.
66809@@ -647,6 +1009,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
66810 {
66811 unsigned long random_variable = 0;
66812
66813+#ifdef CONFIG_PAX_RANDUSTACK
66814+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
66815+ return stack_top - current->mm->delta_stack;
66816+#endif
66817+
66818 if ((current->flags & PF_RANDOMIZE) &&
66819 !(current->personality & ADDR_NO_RANDOMIZE)) {
66820 random_variable = (unsigned long) get_random_int();
66821@@ -666,7 +1033,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
66822 unsigned long load_addr = 0, load_bias = 0;
66823 int load_addr_set = 0;
66824 char * elf_interpreter = NULL;
66825- unsigned long error;
66826+ unsigned long error = 0;
66827 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
66828 unsigned long elf_bss, elf_brk;
66829 int retval, i;
66830@@ -681,6 +1048,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
66831 struct elfhdr interp_elf_ex;
66832 } *loc;
66833 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
66834+ unsigned long pax_task_size;
66835
66836 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
66837 if (!loc) {
66838@@ -839,6 +1207,77 @@ static int load_elf_binary(struct linux_binprm *bprm)
66839 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
66840 may depend on the personality. */
66841 SET_PERSONALITY2(loc->elf_ex, &arch_state);
66842+
66843+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66844+ current->mm->pax_flags = 0UL;
66845+#endif
66846+
66847+#ifdef CONFIG_PAX_DLRESOLVE
66848+ current->mm->call_dl_resolve = 0UL;
66849+#endif
66850+
66851+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
66852+ current->mm->call_syscall = 0UL;
66853+#endif
66854+
66855+#ifdef CONFIG_PAX_ASLR
66856+ current->mm->delta_mmap = 0UL;
66857+ current->mm->delta_stack = 0UL;
66858+#endif
66859+
66860+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66861+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
66862+ send_sig(SIGKILL, current, 0);
66863+ goto out_free_dentry;
66864+ }
66865+#endif
66866+
66867+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
66868+ pax_set_initial_flags(bprm);
66869+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
66870+ if (pax_set_initial_flags_func)
66871+ (pax_set_initial_flags_func)(bprm);
66872+#endif
66873+
66874+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66875+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
66876+ current->mm->context.user_cs_limit = PAGE_SIZE;
66877+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
66878+ }
66879+#endif
66880+
66881+#ifdef CONFIG_PAX_SEGMEXEC
66882+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
66883+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
66884+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
66885+ pax_task_size = SEGMEXEC_TASK_SIZE;
66886+ current->mm->def_flags |= VM_NOHUGEPAGE;
66887+ } else
66888+#endif
66889+
66890+ pax_task_size = TASK_SIZE;
66891+
66892+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
66893+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
66894+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
66895+ put_cpu();
66896+ }
66897+#endif
66898+
66899+#ifdef CONFIG_PAX_ASLR
66900+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
66901+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
66902+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
66903+ }
66904+#endif
66905+
66906+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
66907+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
66908+ executable_stack = EXSTACK_DISABLE_X;
66909+ current->personality &= ~READ_IMPLIES_EXEC;
66910+ } else
66911+#endif
66912+
66913 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
66914 current->personality |= READ_IMPLIES_EXEC;
66915
66916@@ -925,12 +1364,21 @@ static int load_elf_binary(struct linux_binprm *bprm)
66917 #else
66918 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
66919 #endif
66920- total_size = total_mapping_size(elf_phdata,
66921- loc->elf_ex.e_phnum);
66922- if (!total_size) {
66923- error = -EINVAL;
66924- goto out_free_dentry;
66925+
66926+#ifdef CONFIG_PAX_RANDMMAP
66927+ /* PaX: randomize base address at the default exe base if requested */
66928+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
66929+#ifdef CONFIG_SPARC64
66930+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
66931+#else
66932+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
66933+#endif
66934+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
66935+ elf_flags |= MAP_FIXED;
66936 }
66937+#endif
66938+
66939+ total_size = total_mapping_size(elf_phdata, loc->elf_ex.e_phnum);
66940 }
66941
66942 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
66943@@ -962,9 +1410,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
66944 * allowed task size. Note that p_filesz must always be
66945 * <= p_memsz so it is only necessary to check p_memsz.
66946 */
66947- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
66948- elf_ppnt->p_memsz > TASK_SIZE ||
66949- TASK_SIZE - elf_ppnt->p_memsz < k) {
66950+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
66951+ elf_ppnt->p_memsz > pax_task_size ||
66952+ pax_task_size - elf_ppnt->p_memsz < k) {
66953 /* set_brk can never work. Avoid overflows. */
66954 retval = -EINVAL;
66955 goto out_free_dentry;
66956@@ -1000,16 +1448,43 @@ static int load_elf_binary(struct linux_binprm *bprm)
66957 if (retval)
66958 goto out_free_dentry;
66959 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
66960- retval = -EFAULT; /* Nobody gets to see this, but.. */
66961- goto out_free_dentry;
66962+ /*
66963+ * This bss-zeroing can fail if the ELF
66964+ * file specifies odd protections. So
66965+ * we don't check the return value
66966+ */
66967 }
66968
66969+#ifdef CONFIG_PAX_RANDMMAP
66970+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
66971+ unsigned long start, size, flags;
66972+ vm_flags_t vm_flags;
66973+
66974+ start = ELF_PAGEALIGN(elf_brk);
66975+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
66976+ flags = MAP_FIXED | MAP_PRIVATE;
66977+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
66978+
66979+ down_write(&current->mm->mmap_sem);
66980+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
66981+ retval = -ENOMEM;
66982+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
66983+// if (current->personality & ADDR_NO_RANDOMIZE)
66984+// vm_flags |= VM_READ | VM_MAYREAD;
66985+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
66986+ retval = IS_ERR_VALUE(start) ? start : 0;
66987+ }
66988+ up_write(&current->mm->mmap_sem);
66989+ if (retval == 0)
66990+ retval = set_brk(start + size, start + size + PAGE_SIZE);
66991+ if (retval < 0)
66992+ goto out_free_dentry;
66993+ }
66994+#endif
66995+
66996 if (elf_interpreter) {
66997- unsigned long interp_map_addr = 0;
66998-
66999 elf_entry = load_elf_interp(&loc->interp_elf_ex,
67000 interpreter,
67001- &interp_map_addr,
67002 load_bias, interp_elf_phdata);
67003 if (!IS_ERR((void *)elf_entry)) {
67004 /*
67005@@ -1237,7 +1712,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
67006 * Decide what to dump of a segment, part, all or none.
67007 */
67008 static unsigned long vma_dump_size(struct vm_area_struct *vma,
67009- unsigned long mm_flags)
67010+ unsigned long mm_flags, long signr)
67011 {
67012 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
67013
67014@@ -1275,7 +1750,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
67015 if (vma->vm_file == NULL)
67016 return 0;
67017
67018- if (FILTER(MAPPED_PRIVATE))
67019+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
67020 goto whole;
67021
67022 /*
67023@@ -1482,9 +1957,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
67024 {
67025 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
67026 int i = 0;
67027- do
67028+ do {
67029 i += 2;
67030- while (auxv[i - 2] != AT_NULL);
67031+ } while (auxv[i - 2] != AT_NULL);
67032 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
67033 }
67034
67035@@ -1493,7 +1968,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
67036 {
67037 mm_segment_t old_fs = get_fs();
67038 set_fs(KERNEL_DS);
67039- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
67040+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
67041 set_fs(old_fs);
67042 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
67043 }
67044@@ -2213,7 +2688,7 @@ static int elf_core_dump(struct coredump_params *cprm)
67045 vma = next_vma(vma, gate_vma)) {
67046 unsigned long dump_size;
67047
67048- dump_size = vma_dump_size(vma, cprm->mm_flags);
67049+ dump_size = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
67050 vma_filesz[i++] = dump_size;
67051 vma_data_size += dump_size;
67052 }
67053@@ -2321,6 +2796,167 @@ out:
67054
67055 #endif /* CONFIG_ELF_CORE */
67056
67057+#ifdef CONFIG_PAX_MPROTECT
67058+/* PaX: non-PIC ELF libraries need relocations on their executable segments
67059+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
67060+ * we'll remove VM_MAYWRITE for good on RELRO segments.
67061+ *
67062+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
67063+ * basis because we want to allow the common case and not the special ones.
67064+ */
67065+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
67066+{
67067+ struct elfhdr elf_h;
67068+ struct elf_phdr elf_p;
67069+ unsigned long i;
67070+ unsigned long oldflags;
67071+ bool is_textrel_rw, is_textrel_rx, is_relro;
67072+
67073+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
67074+ return;
67075+
67076+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
67077+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
67078+
67079+#ifdef CONFIG_PAX_ELFRELOCS
67080+ /* possible TEXTREL */
67081+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
67082+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
67083+#else
67084+ is_textrel_rw = false;
67085+ is_textrel_rx = false;
67086+#endif
67087+
67088+ /* possible RELRO */
67089+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
67090+
67091+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
67092+ return;
67093+
67094+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
67095+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
67096+
67097+#ifdef CONFIG_PAX_ETEXECRELOCS
67098+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
67099+#else
67100+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
67101+#endif
67102+
67103+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
67104+ !elf_check_arch(&elf_h) ||
67105+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
67106+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
67107+ return;
67108+
67109+ for (i = 0UL; i < elf_h.e_phnum; i++) {
67110+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
67111+ return;
67112+ switch (elf_p.p_type) {
67113+ case PT_DYNAMIC:
67114+ if (!is_textrel_rw && !is_textrel_rx)
67115+ continue;
67116+ i = 0UL;
67117+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
67118+ elf_dyn dyn;
67119+
67120+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
67121+ break;
67122+ if (dyn.d_tag == DT_NULL)
67123+ break;
67124+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
67125+ gr_log_textrel(vma);
67126+ if (is_textrel_rw)
67127+ vma->vm_flags |= VM_MAYWRITE;
67128+ else
67129+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
67130+ vma->vm_flags &= ~VM_MAYWRITE;
67131+ break;
67132+ }
67133+ i++;
67134+ }
67135+ is_textrel_rw = false;
67136+ is_textrel_rx = false;
67137+ continue;
67138+
67139+ case PT_GNU_RELRO:
67140+ if (!is_relro)
67141+ continue;
67142+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
67143+ vma->vm_flags &= ~VM_MAYWRITE;
67144+ is_relro = false;
67145+ continue;
67146+
67147+#ifdef CONFIG_PAX_PT_PAX_FLAGS
67148+ case PT_PAX_FLAGS: {
67149+ const char *msg_mprotect = "", *msg_emutramp = "";
67150+ char *buffer_lib, *buffer_exe;
67151+
67152+ if (elf_p.p_flags & PF_NOMPROTECT)
67153+ msg_mprotect = "MPROTECT disabled";
67154+
67155+#ifdef CONFIG_PAX_EMUTRAMP
67156+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
67157+ msg_emutramp = "EMUTRAMP enabled";
67158+#endif
67159+
67160+ if (!msg_mprotect[0] && !msg_emutramp[0])
67161+ continue;
67162+
67163+ if (!printk_ratelimit())
67164+ continue;
67165+
67166+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
67167+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
67168+ if (buffer_lib && buffer_exe) {
67169+ char *path_lib, *path_exe;
67170+
67171+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
67172+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
67173+
67174+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
67175+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
67176+
67177+ }
67178+ free_page((unsigned long)buffer_exe);
67179+ free_page((unsigned long)buffer_lib);
67180+ continue;
67181+ }
67182+#endif
67183+
67184+ }
67185+ }
67186+}
67187+#endif
67188+
67189+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
67190+
67191+extern int grsec_enable_log_rwxmaps;
67192+
67193+static void elf_handle_mmap(struct file *file)
67194+{
67195+ struct elfhdr elf_h;
67196+ struct elf_phdr elf_p;
67197+ unsigned long i;
67198+
67199+ if (!grsec_enable_log_rwxmaps)
67200+ return;
67201+
67202+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
67203+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
67204+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
67205+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
67206+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
67207+ return;
67208+
67209+ for (i = 0UL; i < elf_h.e_phnum; i++) {
67210+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
67211+ return;
67212+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
67213+ gr_log_ptgnustack(file);
67214+ }
67215+}
67216+#endif
67217+
67218 static int __init init_elf_binfmt(void)
67219 {
67220 register_binfmt(&elf_format);
67221diff --git a/fs/block_dev.c b/fs/block_dev.c
67222index 975266b..c3d1856 100644
67223--- a/fs/block_dev.c
67224+++ b/fs/block_dev.c
67225@@ -734,7 +734,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
67226 else if (bdev->bd_contains == bdev)
67227 return true; /* is a whole device which isn't held */
67228
67229- else if (whole->bd_holder == bd_may_claim)
67230+ else if (whole->bd_holder == (void *)bd_may_claim)
67231 return true; /* is a partition of a device that is being partitioned */
67232 else if (whole->bd_holder != NULL)
67233 return false; /* is a partition of a held device */
67234diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
67235index 6d67f32..8f33187 100644
67236--- a/fs/btrfs/ctree.c
67237+++ b/fs/btrfs/ctree.c
67238@@ -1181,9 +1181,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
67239 free_extent_buffer(buf);
67240 add_root_to_dirty_list(root);
67241 } else {
67242- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
67243- parent_start = parent->start;
67244- else
67245+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
67246+ if (parent)
67247+ parent_start = parent->start;
67248+ else
67249+ parent_start = 0;
67250+ } else
67251 parent_start = 0;
67252
67253 WARN_ON(trans->transid != btrfs_header_generation(parent));
67254diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
67255index 82f0c7c..dff78a8 100644
67256--- a/fs/btrfs/delayed-inode.c
67257+++ b/fs/btrfs/delayed-inode.c
67258@@ -462,7 +462,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
67259
67260 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
67261 {
67262- int seq = atomic_inc_return(&delayed_root->items_seq);
67263+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
67264 if ((atomic_dec_return(&delayed_root->items) <
67265 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
67266 waitqueue_active(&delayed_root->wait))
67267@@ -1412,7 +1412,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
67268
67269 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
67270 {
67271- int val = atomic_read(&delayed_root->items_seq);
67272+ int val = atomic_read_unchecked(&delayed_root->items_seq);
67273
67274 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
67275 return 1;
67276@@ -1436,7 +1436,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
67277 int seq;
67278 int ret;
67279
67280- seq = atomic_read(&delayed_root->items_seq);
67281+ seq = atomic_read_unchecked(&delayed_root->items_seq);
67282
67283 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
67284 if (ret)
67285diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
67286index f70119f..ab5894d 100644
67287--- a/fs/btrfs/delayed-inode.h
67288+++ b/fs/btrfs/delayed-inode.h
67289@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
67290 */
67291 struct list_head prepare_list;
67292 atomic_t items; /* for delayed items */
67293- atomic_t items_seq; /* for delayed items */
67294+ atomic_unchecked_t items_seq; /* for delayed items */
67295 int nodes; /* for delayed nodes */
67296 wait_queue_head_t wait;
67297 };
67298@@ -90,7 +90,7 @@ static inline void btrfs_init_delayed_root(
67299 struct btrfs_delayed_root *delayed_root)
67300 {
67301 atomic_set(&delayed_root->items, 0);
67302- atomic_set(&delayed_root->items_seq, 0);
67303+ atomic_set_unchecked(&delayed_root->items_seq, 0);
67304 delayed_root->nodes = 0;
67305 spin_lock_init(&delayed_root->lock);
67306 init_waitqueue_head(&delayed_root->wait);
67307diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
67308index 05fef19..f3774b8 100644
67309--- a/fs/btrfs/super.c
67310+++ b/fs/btrfs/super.c
67311@@ -271,7 +271,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
67312 function, line, errstr);
67313 return;
67314 }
67315- ACCESS_ONCE(trans->transaction->aborted) = errno;
67316+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
67317 /* Wake up anybody who may be waiting on this transaction */
67318 wake_up(&root->fs_info->transaction_wait);
67319 wake_up(&root->fs_info->transaction_blocked_wait);
67320diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
67321index 94edb0a..e94dc93 100644
67322--- a/fs/btrfs/sysfs.c
67323+++ b/fs/btrfs/sysfs.c
67324@@ -472,7 +472,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
67325 for (set = 0; set < FEAT_MAX; set++) {
67326 int i;
67327 struct attribute *attrs[2];
67328- struct attribute_group agroup = {
67329+ attribute_group_no_const agroup = {
67330 .name = "features",
67331 .attrs = attrs,
67332 };
67333diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
67334index 2299bfd..4098e72 100644
67335--- a/fs/btrfs/tests/free-space-tests.c
67336+++ b/fs/btrfs/tests/free-space-tests.c
67337@@ -463,7 +463,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
67338 * extent entry.
67339 */
67340 use_bitmap_op = cache->free_space_ctl->op->use_bitmap;
67341- cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
67342+ pax_open_kernel();
67343+ *(void **)&cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
67344+ pax_close_kernel();
67345
67346 /*
67347 * Extent entry covering free space range [128Mb - 256Kb, 128Mb - 128Kb[
67348@@ -870,7 +872,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
67349 if (ret)
67350 return ret;
67351
67352- cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
67353+ pax_open_kernel();
67354+ *(void **)&cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
67355+ pax_close_kernel();
67356 __btrfs_remove_free_space_cache(cache->free_space_ctl);
67357
67358 return 0;
67359diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
67360index 154990c..d0cf699 100644
67361--- a/fs/btrfs/tree-log.h
67362+++ b/fs/btrfs/tree-log.h
67363@@ -43,7 +43,7 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
67364 static inline void btrfs_set_log_full_commit(struct btrfs_fs_info *fs_info,
67365 struct btrfs_trans_handle *trans)
67366 {
67367- ACCESS_ONCE(fs_info->last_trans_log_full_commit) = trans->transid;
67368+ ACCESS_ONCE_RW(fs_info->last_trans_log_full_commit) = trans->transid;
67369 }
67370
67371 static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
67372diff --git a/fs/buffer.c b/fs/buffer.c
67373index 20805db..2e8fc69 100644
67374--- a/fs/buffer.c
67375+++ b/fs/buffer.c
67376@@ -3417,7 +3417,7 @@ void __init buffer_init(void)
67377 bh_cachep = kmem_cache_create("buffer_head",
67378 sizeof(struct buffer_head), 0,
67379 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
67380- SLAB_MEM_SPREAD),
67381+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
67382 NULL);
67383
67384 /*
67385diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
67386index fbb08e9..0fda764 100644
67387--- a/fs/cachefiles/bind.c
67388+++ b/fs/cachefiles/bind.c
67389@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
67390 args);
67391
67392 /* start by checking things over */
67393- ASSERT(cache->fstop_percent >= 0 &&
67394- cache->fstop_percent < cache->fcull_percent &&
67395+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
67396 cache->fcull_percent < cache->frun_percent &&
67397 cache->frun_percent < 100);
67398
67399- ASSERT(cache->bstop_percent >= 0 &&
67400- cache->bstop_percent < cache->bcull_percent &&
67401+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
67402 cache->bcull_percent < cache->brun_percent &&
67403 cache->brun_percent < 100);
67404
67405diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
67406index f601def..b2cf704 100644
67407--- a/fs/cachefiles/daemon.c
67408+++ b/fs/cachefiles/daemon.c
67409@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
67410 if (n > buflen)
67411 return -EMSGSIZE;
67412
67413- if (copy_to_user(_buffer, buffer, n) != 0)
67414+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
67415 return -EFAULT;
67416
67417 return n;
67418@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
67419 if (test_bit(CACHEFILES_DEAD, &cache->flags))
67420 return -EIO;
67421
67422- if (datalen < 0 || datalen > PAGE_SIZE - 1)
67423+ if (datalen > PAGE_SIZE - 1)
67424 return -EOPNOTSUPP;
67425
67426 /* drag the command string into the kernel so we can parse it */
67427@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
67428 if (args[0] != '%' || args[1] != '\0')
67429 return -EINVAL;
67430
67431- if (fstop < 0 || fstop >= cache->fcull_percent)
67432+ if (fstop >= cache->fcull_percent)
67433 return cachefiles_daemon_range_error(cache, args);
67434
67435 cache->fstop_percent = fstop;
67436@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
67437 if (args[0] != '%' || args[1] != '\0')
67438 return -EINVAL;
67439
67440- if (bstop < 0 || bstop >= cache->bcull_percent)
67441+ if (bstop >= cache->bcull_percent)
67442 return cachefiles_daemon_range_error(cache, args);
67443
67444 cache->bstop_percent = bstop;
67445diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
67446index 8c52472..c4e3a69 100644
67447--- a/fs/cachefiles/internal.h
67448+++ b/fs/cachefiles/internal.h
67449@@ -66,7 +66,7 @@ struct cachefiles_cache {
67450 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
67451 struct rb_root active_nodes; /* active nodes (can't be culled) */
67452 rwlock_t active_lock; /* lock for active_nodes */
67453- atomic_t gravecounter; /* graveyard uniquifier */
67454+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
67455 unsigned frun_percent; /* when to stop culling (% files) */
67456 unsigned fcull_percent; /* when to start culling (% files) */
67457 unsigned fstop_percent; /* when to stop allocating (% files) */
67458@@ -178,19 +178,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
67459 * proc.c
67460 */
67461 #ifdef CONFIG_CACHEFILES_HISTOGRAM
67462-extern atomic_t cachefiles_lookup_histogram[HZ];
67463-extern atomic_t cachefiles_mkdir_histogram[HZ];
67464-extern atomic_t cachefiles_create_histogram[HZ];
67465+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
67466+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
67467+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
67468
67469 extern int __init cachefiles_proc_init(void);
67470 extern void cachefiles_proc_cleanup(void);
67471 static inline
67472-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
67473+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
67474 {
67475 unsigned long jif = jiffies - start_jif;
67476 if (jif >= HZ)
67477 jif = HZ - 1;
67478- atomic_inc(&histogram[jif]);
67479+ atomic_inc_unchecked(&histogram[jif]);
67480 }
67481
67482 #else
67483diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
67484index 1e51714..411eded 100644
67485--- a/fs/cachefiles/namei.c
67486+++ b/fs/cachefiles/namei.c
67487@@ -309,7 +309,7 @@ try_again:
67488 /* first step is to make up a grave dentry in the graveyard */
67489 sprintf(nbuffer, "%08x%08x",
67490 (uint32_t) get_seconds(),
67491- (uint32_t) atomic_inc_return(&cache->gravecounter));
67492+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
67493
67494 /* do the multiway lock magic */
67495 trap = lock_rename(cache->graveyard, dir);
67496diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
67497index eccd339..4c1d995 100644
67498--- a/fs/cachefiles/proc.c
67499+++ b/fs/cachefiles/proc.c
67500@@ -14,9 +14,9 @@
67501 #include <linux/seq_file.h>
67502 #include "internal.h"
67503
67504-atomic_t cachefiles_lookup_histogram[HZ];
67505-atomic_t cachefiles_mkdir_histogram[HZ];
67506-atomic_t cachefiles_create_histogram[HZ];
67507+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
67508+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
67509+atomic_unchecked_t cachefiles_create_histogram[HZ];
67510
67511 /*
67512 * display the latency histogram
67513@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
67514 return 0;
67515 default:
67516 index = (unsigned long) v - 3;
67517- x = atomic_read(&cachefiles_lookup_histogram[index]);
67518- y = atomic_read(&cachefiles_mkdir_histogram[index]);
67519- z = atomic_read(&cachefiles_create_histogram[index]);
67520+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
67521+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
67522+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
67523 if (x == 0 && y == 0 && z == 0)
67524 return 0;
67525
67526diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
67527index 83e9976..bfd1eee 100644
67528--- a/fs/ceph/dir.c
67529+++ b/fs/ceph/dir.c
67530@@ -127,6 +127,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
67531 struct dentry *dentry, *last;
67532 struct ceph_dentry_info *di;
67533 int err = 0;
67534+ char d_name[DNAME_INLINE_LEN];
67535+ const unsigned char *name;
67536
67537 /* claim ref on last dentry we returned */
67538 last = fi->dentry;
67539@@ -190,7 +192,12 @@ more:
67540
67541 dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
67542 dentry, dentry, dentry->d_inode);
67543- if (!dir_emit(ctx, dentry->d_name.name,
67544+ name = dentry->d_name.name;
67545+ if (name == dentry->d_iname) {
67546+ memcpy(d_name, name, dentry->d_name.len);
67547+ name = d_name;
67548+ }
67549+ if (!dir_emit(ctx, name,
67550 dentry->d_name.len,
67551 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
67552 dentry->d_inode->i_mode >> 12)) {
67553@@ -248,7 +255,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
67554 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
67555 struct ceph_mds_client *mdsc = fsc->mdsc;
67556 unsigned frag = fpos_frag(ctx->pos);
67557- int off = fpos_off(ctx->pos);
67558+ unsigned int off = fpos_off(ctx->pos);
67559 int err;
67560 u32 ftype;
67561 struct ceph_mds_reply_info_parsed *rinfo;
67562diff --git a/fs/ceph/super.c b/fs/ceph/super.c
67563index a63997b..ddc0577 100644
67564--- a/fs/ceph/super.c
67565+++ b/fs/ceph/super.c
67566@@ -889,7 +889,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
67567 /*
67568 * construct our own bdi so we can control readahead, etc.
67569 */
67570-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
67571+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
67572
67573 static int ceph_register_bdi(struct super_block *sb,
67574 struct ceph_fs_client *fsc)
67575@@ -906,7 +906,7 @@ static int ceph_register_bdi(struct super_block *sb,
67576 VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE;
67577
67578 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
67579- atomic_long_inc_return(&bdi_seq));
67580+ atomic_long_inc_return_unchecked(&bdi_seq));
67581 if (!err)
67582 sb->s_bdi = &fsc->backing_dev_info;
67583 return err;
67584diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
67585index 7febcf2..62a5721 100644
67586--- a/fs/cifs/cifs_debug.c
67587+++ b/fs/cifs/cifs_debug.c
67588@@ -269,8 +269,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
67589
67590 if (strtobool(&c, &bv) == 0) {
67591 #ifdef CONFIG_CIFS_STATS2
67592- atomic_set(&totBufAllocCount, 0);
67593- atomic_set(&totSmBufAllocCount, 0);
67594+ atomic_set_unchecked(&totBufAllocCount, 0);
67595+ atomic_set_unchecked(&totSmBufAllocCount, 0);
67596 #endif /* CONFIG_CIFS_STATS2 */
67597 spin_lock(&cifs_tcp_ses_lock);
67598 list_for_each(tmp1, &cifs_tcp_ses_list) {
67599@@ -283,7 +283,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
67600 tcon = list_entry(tmp3,
67601 struct cifs_tcon,
67602 tcon_list);
67603- atomic_set(&tcon->num_smbs_sent, 0);
67604+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
67605 if (server->ops->clear_stats)
67606 server->ops->clear_stats(tcon);
67607 }
67608@@ -315,8 +315,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
67609 smBufAllocCount.counter, cifs_min_small);
67610 #ifdef CONFIG_CIFS_STATS2
67611 seq_printf(m, "Total Large %d Small %d Allocations\n",
67612- atomic_read(&totBufAllocCount),
67613- atomic_read(&totSmBufAllocCount));
67614+ atomic_read_unchecked(&totBufAllocCount),
67615+ atomic_read_unchecked(&totSmBufAllocCount));
67616 #endif /* CONFIG_CIFS_STATS2 */
67617
67618 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
67619@@ -345,7 +345,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
67620 if (tcon->need_reconnect)
67621 seq_puts(m, "\tDISCONNECTED ");
67622 seq_printf(m, "\nSMBs: %d",
67623- atomic_read(&tcon->num_smbs_sent));
67624+ atomic_read_unchecked(&tcon->num_smbs_sent));
67625 if (server->ops->print_stats)
67626 server->ops->print_stats(m, tcon);
67627 }
67628diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
67629index d72fe37..ded5511 100644
67630--- a/fs/cifs/cifsfs.c
67631+++ b/fs/cifs/cifsfs.c
67632@@ -1092,7 +1092,7 @@ cifs_init_request_bufs(void)
67633 */
67634 cifs_req_cachep = kmem_cache_create("cifs_request",
67635 CIFSMaxBufSize + max_hdr_size, 0,
67636- SLAB_HWCACHE_ALIGN, NULL);
67637+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
67638 if (cifs_req_cachep == NULL)
67639 return -ENOMEM;
67640
67641@@ -1119,7 +1119,7 @@ cifs_init_request_bufs(void)
67642 efficient to alloc 1 per page off the slab compared to 17K (5page)
67643 alloc of large cifs buffers even when page debugging is on */
67644 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
67645- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
67646+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
67647 NULL);
67648 if (cifs_sm_req_cachep == NULL) {
67649 mempool_destroy(cifs_req_poolp);
67650@@ -1204,8 +1204,8 @@ init_cifs(void)
67651 atomic_set(&bufAllocCount, 0);
67652 atomic_set(&smBufAllocCount, 0);
67653 #ifdef CONFIG_CIFS_STATS2
67654- atomic_set(&totBufAllocCount, 0);
67655- atomic_set(&totSmBufAllocCount, 0);
67656+ atomic_set_unchecked(&totBufAllocCount, 0);
67657+ atomic_set_unchecked(&totSmBufAllocCount, 0);
67658 #endif /* CONFIG_CIFS_STATS2 */
67659
67660 atomic_set(&midCount, 0);
67661diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
67662index 22b289a..bbbba082 100644
67663--- a/fs/cifs/cifsglob.h
67664+++ b/fs/cifs/cifsglob.h
67665@@ -823,35 +823,35 @@ struct cifs_tcon {
67666 __u16 Flags; /* optional support bits */
67667 enum statusEnum tidStatus;
67668 #ifdef CONFIG_CIFS_STATS
67669- atomic_t num_smbs_sent;
67670+ atomic_unchecked_t num_smbs_sent;
67671 union {
67672 struct {
67673- atomic_t num_writes;
67674- atomic_t num_reads;
67675- atomic_t num_flushes;
67676- atomic_t num_oplock_brks;
67677- atomic_t num_opens;
67678- atomic_t num_closes;
67679- atomic_t num_deletes;
67680- atomic_t num_mkdirs;
67681- atomic_t num_posixopens;
67682- atomic_t num_posixmkdirs;
67683- atomic_t num_rmdirs;
67684- atomic_t num_renames;
67685- atomic_t num_t2renames;
67686- atomic_t num_ffirst;
67687- atomic_t num_fnext;
67688- atomic_t num_fclose;
67689- atomic_t num_hardlinks;
67690- atomic_t num_symlinks;
67691- atomic_t num_locks;
67692- atomic_t num_acl_get;
67693- atomic_t num_acl_set;
67694+ atomic_unchecked_t num_writes;
67695+ atomic_unchecked_t num_reads;
67696+ atomic_unchecked_t num_flushes;
67697+ atomic_unchecked_t num_oplock_brks;
67698+ atomic_unchecked_t num_opens;
67699+ atomic_unchecked_t num_closes;
67700+ atomic_unchecked_t num_deletes;
67701+ atomic_unchecked_t num_mkdirs;
67702+ atomic_unchecked_t num_posixopens;
67703+ atomic_unchecked_t num_posixmkdirs;
67704+ atomic_unchecked_t num_rmdirs;
67705+ atomic_unchecked_t num_renames;
67706+ atomic_unchecked_t num_t2renames;
67707+ atomic_unchecked_t num_ffirst;
67708+ atomic_unchecked_t num_fnext;
67709+ atomic_unchecked_t num_fclose;
67710+ atomic_unchecked_t num_hardlinks;
67711+ atomic_unchecked_t num_symlinks;
67712+ atomic_unchecked_t num_locks;
67713+ atomic_unchecked_t num_acl_get;
67714+ atomic_unchecked_t num_acl_set;
67715 } cifs_stats;
67716 #ifdef CONFIG_CIFS_SMB2
67717 struct {
67718- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
67719- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
67720+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
67721+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
67722 } smb2_stats;
67723 #endif /* CONFIG_CIFS_SMB2 */
67724 } stats;
67725@@ -1198,7 +1198,7 @@ convert_delimiter(char *path, char delim)
67726 }
67727
67728 #ifdef CONFIG_CIFS_STATS
67729-#define cifs_stats_inc atomic_inc
67730+#define cifs_stats_inc atomic_inc_unchecked
67731
67732 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
67733 unsigned int bytes)
67734@@ -1565,8 +1565,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
67735 /* Various Debug counters */
67736 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
67737 #ifdef CONFIG_CIFS_STATS2
67738-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
67739-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
67740+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
67741+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
67742 #endif
67743 GLOBAL_EXTERN atomic_t smBufAllocCount;
67744 GLOBAL_EXTERN atomic_t midCount;
67745diff --git a/fs/cifs/file.c b/fs/cifs/file.c
67746index ca30c39..570fb94 100644
67747--- a/fs/cifs/file.c
67748+++ b/fs/cifs/file.c
67749@@ -2055,10 +2055,14 @@ static int cifs_writepages(struct address_space *mapping,
67750 index = mapping->writeback_index; /* Start from prev offset */
67751 end = -1;
67752 } else {
67753- index = wbc->range_start >> PAGE_CACHE_SHIFT;
67754- end = wbc->range_end >> PAGE_CACHE_SHIFT;
67755- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
67756+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
67757 range_whole = true;
67758+ index = 0;
67759+ end = ULONG_MAX;
67760+ } else {
67761+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
67762+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
67763+ }
67764 scanned = true;
67765 }
67766 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
67767diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
67768index 3379463..3af418a 100644
67769--- a/fs/cifs/misc.c
67770+++ b/fs/cifs/misc.c
67771@@ -170,7 +170,7 @@ cifs_buf_get(void)
67772 memset(ret_buf, 0, buf_size + 3);
67773 atomic_inc(&bufAllocCount);
67774 #ifdef CONFIG_CIFS_STATS2
67775- atomic_inc(&totBufAllocCount);
67776+ atomic_inc_unchecked(&totBufAllocCount);
67777 #endif /* CONFIG_CIFS_STATS2 */
67778 }
67779
67780@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
67781 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
67782 atomic_inc(&smBufAllocCount);
67783 #ifdef CONFIG_CIFS_STATS2
67784- atomic_inc(&totSmBufAllocCount);
67785+ atomic_inc_unchecked(&totSmBufAllocCount);
67786 #endif /* CONFIG_CIFS_STATS2 */
67787
67788 }
67789diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
67790index d297903..1cb7516 100644
67791--- a/fs/cifs/smb1ops.c
67792+++ b/fs/cifs/smb1ops.c
67793@@ -622,27 +622,27 @@ static void
67794 cifs_clear_stats(struct cifs_tcon *tcon)
67795 {
67796 #ifdef CONFIG_CIFS_STATS
67797- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
67798- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
67799- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
67800- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
67801- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
67802- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
67803- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
67804- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
67805- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
67806- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
67807- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
67808- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
67809- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
67810- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
67811- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
67812- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
67813- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
67814- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
67815- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
67816- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
67817- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
67818+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
67819+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
67820+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
67821+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
67822+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
67823+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
67824+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
67825+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
67826+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
67827+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
67828+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
67829+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
67830+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
67831+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
67832+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
67833+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
67834+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
67835+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
67836+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
67837+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
67838+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
67839 #endif
67840 }
67841
67842@@ -651,36 +651,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
67843 {
67844 #ifdef CONFIG_CIFS_STATS
67845 seq_printf(m, " Oplocks breaks: %d",
67846- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
67847+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
67848 seq_printf(m, "\nReads: %d Bytes: %llu",
67849- atomic_read(&tcon->stats.cifs_stats.num_reads),
67850+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
67851 (long long)(tcon->bytes_read));
67852 seq_printf(m, "\nWrites: %d Bytes: %llu",
67853- atomic_read(&tcon->stats.cifs_stats.num_writes),
67854+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
67855 (long long)(tcon->bytes_written));
67856 seq_printf(m, "\nFlushes: %d",
67857- atomic_read(&tcon->stats.cifs_stats.num_flushes));
67858+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
67859 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
67860- atomic_read(&tcon->stats.cifs_stats.num_locks),
67861- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
67862- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
67863+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
67864+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
67865+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
67866 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
67867- atomic_read(&tcon->stats.cifs_stats.num_opens),
67868- atomic_read(&tcon->stats.cifs_stats.num_closes),
67869- atomic_read(&tcon->stats.cifs_stats.num_deletes));
67870+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
67871+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
67872+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
67873 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
67874- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
67875- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
67876+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
67877+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
67878 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
67879- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
67880- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
67881+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
67882+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
67883 seq_printf(m, "\nRenames: %d T2 Renames %d",
67884- atomic_read(&tcon->stats.cifs_stats.num_renames),
67885- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
67886+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
67887+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
67888 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
67889- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
67890- atomic_read(&tcon->stats.cifs_stats.num_fnext),
67891- atomic_read(&tcon->stats.cifs_stats.num_fclose));
67892+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
67893+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
67894+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
67895 #endif
67896 }
67897
67898diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
67899index eab05e1..ffe5ea4 100644
67900--- a/fs/cifs/smb2ops.c
67901+++ b/fs/cifs/smb2ops.c
67902@@ -418,8 +418,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
67903 #ifdef CONFIG_CIFS_STATS
67904 int i;
67905 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
67906- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
67907- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
67908+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
67909+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
67910 }
67911 #endif
67912 }
67913@@ -459,65 +459,65 @@ static void
67914 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
67915 {
67916 #ifdef CONFIG_CIFS_STATS
67917- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
67918- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
67919+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
67920+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
67921 seq_printf(m, "\nNegotiates: %d sent %d failed",
67922- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
67923- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
67924+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
67925+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
67926 seq_printf(m, "\nSessionSetups: %d sent %d failed",
67927- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
67928- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
67929+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
67930+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
67931 seq_printf(m, "\nLogoffs: %d sent %d failed",
67932- atomic_read(&sent[SMB2_LOGOFF_HE]),
67933- atomic_read(&failed[SMB2_LOGOFF_HE]));
67934+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
67935+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
67936 seq_printf(m, "\nTreeConnects: %d sent %d failed",
67937- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
67938- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
67939+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
67940+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
67941 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
67942- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
67943- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
67944+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
67945+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
67946 seq_printf(m, "\nCreates: %d sent %d failed",
67947- atomic_read(&sent[SMB2_CREATE_HE]),
67948- atomic_read(&failed[SMB2_CREATE_HE]));
67949+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
67950+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
67951 seq_printf(m, "\nCloses: %d sent %d failed",
67952- atomic_read(&sent[SMB2_CLOSE_HE]),
67953- atomic_read(&failed[SMB2_CLOSE_HE]));
67954+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
67955+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
67956 seq_printf(m, "\nFlushes: %d sent %d failed",
67957- atomic_read(&sent[SMB2_FLUSH_HE]),
67958- atomic_read(&failed[SMB2_FLUSH_HE]));
67959+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
67960+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
67961 seq_printf(m, "\nReads: %d sent %d failed",
67962- atomic_read(&sent[SMB2_READ_HE]),
67963- atomic_read(&failed[SMB2_READ_HE]));
67964+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
67965+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
67966 seq_printf(m, "\nWrites: %d sent %d failed",
67967- atomic_read(&sent[SMB2_WRITE_HE]),
67968- atomic_read(&failed[SMB2_WRITE_HE]));
67969+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
67970+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
67971 seq_printf(m, "\nLocks: %d sent %d failed",
67972- atomic_read(&sent[SMB2_LOCK_HE]),
67973- atomic_read(&failed[SMB2_LOCK_HE]));
67974+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
67975+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
67976 seq_printf(m, "\nIOCTLs: %d sent %d failed",
67977- atomic_read(&sent[SMB2_IOCTL_HE]),
67978- atomic_read(&failed[SMB2_IOCTL_HE]));
67979+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
67980+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
67981 seq_printf(m, "\nCancels: %d sent %d failed",
67982- atomic_read(&sent[SMB2_CANCEL_HE]),
67983- atomic_read(&failed[SMB2_CANCEL_HE]));
67984+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
67985+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
67986 seq_printf(m, "\nEchos: %d sent %d failed",
67987- atomic_read(&sent[SMB2_ECHO_HE]),
67988- atomic_read(&failed[SMB2_ECHO_HE]));
67989+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
67990+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
67991 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
67992- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
67993- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
67994+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
67995+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
67996 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
67997- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
67998- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
67999+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
68000+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
68001 seq_printf(m, "\nQueryInfos: %d sent %d failed",
68002- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
68003- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
68004+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
68005+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
68006 seq_printf(m, "\nSetInfos: %d sent %d failed",
68007- atomic_read(&sent[SMB2_SET_INFO_HE]),
68008- atomic_read(&failed[SMB2_SET_INFO_HE]));
68009+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
68010+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
68011 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
68012- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
68013- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
68014+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
68015+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
68016 #endif
68017 }
68018
68019diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
68020index 65cd7a8..3518676 100644
68021--- a/fs/cifs/smb2pdu.c
68022+++ b/fs/cifs/smb2pdu.c
68023@@ -2147,8 +2147,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
68024 default:
68025 cifs_dbg(VFS, "info level %u isn't supported\n",
68026 srch_inf->info_level);
68027- rc = -EINVAL;
68028- goto qdir_exit;
68029+ return -EINVAL;
68030 }
68031
68032 req->FileIndex = cpu_to_le32(index);
68033diff --git a/fs/coda/cache.c b/fs/coda/cache.c
68034index 46ee6f2..89a9e7f 100644
68035--- a/fs/coda/cache.c
68036+++ b/fs/coda/cache.c
68037@@ -24,7 +24,7 @@
68038 #include "coda_linux.h"
68039 #include "coda_cache.h"
68040
68041-static atomic_t permission_epoch = ATOMIC_INIT(0);
68042+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
68043
68044 /* replace or extend an acl cache hit */
68045 void coda_cache_enter(struct inode *inode, int mask)
68046@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
68047 struct coda_inode_info *cii = ITOC(inode);
68048
68049 spin_lock(&cii->c_lock);
68050- cii->c_cached_epoch = atomic_read(&permission_epoch);
68051+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
68052 if (!uid_eq(cii->c_uid, current_fsuid())) {
68053 cii->c_uid = current_fsuid();
68054 cii->c_cached_perm = mask;
68055@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
68056 {
68057 struct coda_inode_info *cii = ITOC(inode);
68058 spin_lock(&cii->c_lock);
68059- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
68060+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
68061 spin_unlock(&cii->c_lock);
68062 }
68063
68064 /* remove all acl caches */
68065 void coda_cache_clear_all(struct super_block *sb)
68066 {
68067- atomic_inc(&permission_epoch);
68068+ atomic_inc_unchecked(&permission_epoch);
68069 }
68070
68071
68072@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
68073 spin_lock(&cii->c_lock);
68074 hit = (mask & cii->c_cached_perm) == mask &&
68075 uid_eq(cii->c_uid, current_fsuid()) &&
68076- cii->c_cached_epoch == atomic_read(&permission_epoch);
68077+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
68078 spin_unlock(&cii->c_lock);
68079
68080 return hit;
68081diff --git a/fs/compat.c b/fs/compat.c
68082index 6fd272d..dd34ba2 100644
68083--- a/fs/compat.c
68084+++ b/fs/compat.c
68085@@ -54,7 +54,7 @@
68086 #include <asm/ioctls.h>
68087 #include "internal.h"
68088
68089-int compat_log = 1;
68090+int compat_log = 0;
68091
68092 int compat_printk(const char *fmt, ...)
68093 {
68094@@ -512,7 +512,7 @@ COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
68095
68096 set_fs(KERNEL_DS);
68097 /* The __user pointer cast is valid because of the set_fs() */
68098- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
68099+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
68100 set_fs(oldfs);
68101 /* truncating is ok because it's a user address */
68102 if (!ret)
68103@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
68104 goto out;
68105
68106 ret = -EINVAL;
68107- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
68108+ if (nr_segs > UIO_MAXIOV)
68109 goto out;
68110 if (nr_segs > fast_segs) {
68111 ret = -ENOMEM;
68112@@ -844,6 +844,7 @@ struct compat_old_linux_dirent {
68113 struct compat_readdir_callback {
68114 struct dir_context ctx;
68115 struct compat_old_linux_dirent __user *dirent;
68116+ struct file * file;
68117 int result;
68118 };
68119
68120@@ -863,6 +864,10 @@ static int compat_fillonedir(struct dir_context *ctx, const char *name,
68121 buf->result = -EOVERFLOW;
68122 return -EOVERFLOW;
68123 }
68124+
68125+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68126+ return 0;
68127+
68128 buf->result++;
68129 dirent = buf->dirent;
68130 if (!access_ok(VERIFY_WRITE, dirent,
68131@@ -894,6 +899,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
68132 if (!f.file)
68133 return -EBADF;
68134
68135+ buf.file = f.file;
68136 error = iterate_dir(f.file, &buf.ctx);
68137 if (buf.result)
68138 error = buf.result;
68139@@ -913,6 +919,7 @@ struct compat_getdents_callback {
68140 struct dir_context ctx;
68141 struct compat_linux_dirent __user *current_dir;
68142 struct compat_linux_dirent __user *previous;
68143+ struct file * file;
68144 int count;
68145 int error;
68146 };
68147@@ -935,6 +942,10 @@ static int compat_filldir(struct dir_context *ctx, const char *name, int namlen,
68148 buf->error = -EOVERFLOW;
68149 return -EOVERFLOW;
68150 }
68151+
68152+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68153+ return 0;
68154+
68155 dirent = buf->previous;
68156 if (dirent) {
68157 if (__put_user(offset, &dirent->d_off))
68158@@ -980,6 +991,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
68159 if (!f.file)
68160 return -EBADF;
68161
68162+ buf.file = f.file;
68163 error = iterate_dir(f.file, &buf.ctx);
68164 if (error >= 0)
68165 error = buf.error;
68166@@ -1000,6 +1012,7 @@ struct compat_getdents_callback64 {
68167 struct dir_context ctx;
68168 struct linux_dirent64 __user *current_dir;
68169 struct linux_dirent64 __user *previous;
68170+ struct file * file;
68171 int count;
68172 int error;
68173 };
68174@@ -1018,6 +1031,10 @@ static int compat_filldir64(struct dir_context *ctx, const char *name,
68175 buf->error = -EINVAL; /* only used if we fail.. */
68176 if (reclen > buf->count)
68177 return -EINVAL;
68178+
68179+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68180+ return 0;
68181+
68182 dirent = buf->previous;
68183
68184 if (dirent) {
68185@@ -1067,6 +1084,7 @@ COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd,
68186 if (!f.file)
68187 return -EBADF;
68188
68189+ buf.file = f.file;
68190 error = iterate_dir(f.file, &buf.ctx);
68191 if (error >= 0)
68192 error = buf.error;
68193diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
68194index 4d24d17..4f8c09e 100644
68195--- a/fs/compat_binfmt_elf.c
68196+++ b/fs/compat_binfmt_elf.c
68197@@ -30,11 +30,13 @@
68198 #undef elf_phdr
68199 #undef elf_shdr
68200 #undef elf_note
68201+#undef elf_dyn
68202 #undef elf_addr_t
68203 #define elfhdr elf32_hdr
68204 #define elf_phdr elf32_phdr
68205 #define elf_shdr elf32_shdr
68206 #define elf_note elf32_note
68207+#define elf_dyn Elf32_Dyn
68208 #define elf_addr_t Elf32_Addr
68209
68210 /*
68211diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
68212index afec645..9c65620 100644
68213--- a/fs/compat_ioctl.c
68214+++ b/fs/compat_ioctl.c
68215@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
68216 return -EFAULT;
68217 if (__get_user(udata, &ss32->iomem_base))
68218 return -EFAULT;
68219- ss.iomem_base = compat_ptr(udata);
68220+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
68221 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
68222 __get_user(ss.port_high, &ss32->port_high))
68223 return -EFAULT;
68224@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
68225 for (i = 0; i < nmsgs; i++) {
68226 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
68227 return -EFAULT;
68228- if (get_user(datap, &umsgs[i].buf) ||
68229- put_user(compat_ptr(datap), &tmsgs[i].buf))
68230+ if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) ||
68231+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
68232 return -EFAULT;
68233 }
68234 return sys_ioctl(fd, cmd, (unsigned long)tdata);
68235@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
68236 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
68237 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
68238 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
68239- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
68240+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
68241 return -EFAULT;
68242
68243 return ioctl_preallocate(file, p);
68244@@ -1618,8 +1618,8 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
68245 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
68246 {
68247 unsigned int a, b;
68248- a = *(unsigned int *)p;
68249- b = *(unsigned int *)q;
68250+ a = *(const unsigned int *)p;
68251+ b = *(const unsigned int *)q;
68252 if (a > b)
68253 return 1;
68254 if (a < b)
68255diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
68256index cf0db00..c7f70e8 100644
68257--- a/fs/configfs/dir.c
68258+++ b/fs/configfs/dir.c
68259@@ -1540,7 +1540,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
68260 }
68261 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
68262 struct configfs_dirent *next;
68263- const char *name;
68264+ const unsigned char * name;
68265+ char d_name[sizeof(next->s_dentry->d_iname)];
68266 int len;
68267 struct inode *inode = NULL;
68268
68269@@ -1549,7 +1550,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
68270 continue;
68271
68272 name = configfs_get_name(next);
68273- len = strlen(name);
68274+ if (next->s_dentry && name == next->s_dentry->d_iname) {
68275+ len = next->s_dentry->d_name.len;
68276+ memcpy(d_name, name, len);
68277+ name = d_name;
68278+ } else
68279+ len = strlen(name);
68280
68281 /*
68282 * We'll have a dentry and an inode for
68283diff --git a/fs/coredump.c b/fs/coredump.c
68284index bbbe139..b76fae5 100644
68285--- a/fs/coredump.c
68286+++ b/fs/coredump.c
68287@@ -450,8 +450,8 @@ static void wait_for_dump_helpers(struct file *file)
68288 struct pipe_inode_info *pipe = file->private_data;
68289
68290 pipe_lock(pipe);
68291- pipe->readers++;
68292- pipe->writers--;
68293+ atomic_inc(&pipe->readers);
68294+ atomic_dec(&pipe->writers);
68295 wake_up_interruptible_sync(&pipe->wait);
68296 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
68297 pipe_unlock(pipe);
68298@@ -460,11 +460,11 @@ static void wait_for_dump_helpers(struct file *file)
68299 * We actually want wait_event_freezable() but then we need
68300 * to clear TIF_SIGPENDING and improve dump_interrupted().
68301 */
68302- wait_event_interruptible(pipe->wait, pipe->readers == 1);
68303+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
68304
68305 pipe_lock(pipe);
68306- pipe->readers--;
68307- pipe->writers++;
68308+ atomic_dec(&pipe->readers);
68309+ atomic_inc(&pipe->writers);
68310 pipe_unlock(pipe);
68311 }
68312
68313@@ -511,7 +511,9 @@ void do_coredump(const siginfo_t *siginfo)
68314 struct files_struct *displaced;
68315 bool need_nonrelative = false;
68316 bool core_dumped = false;
68317- static atomic_t core_dump_count = ATOMIC_INIT(0);
68318+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
68319+ long signr = siginfo->si_signo;
68320+ int dumpable;
68321 struct coredump_params cprm = {
68322 .siginfo = siginfo,
68323 .regs = signal_pt_regs(),
68324@@ -524,12 +526,17 @@ void do_coredump(const siginfo_t *siginfo)
68325 .mm_flags = mm->flags,
68326 };
68327
68328- audit_core_dumps(siginfo->si_signo);
68329+ audit_core_dumps(signr);
68330+
68331+ dumpable = __get_dumpable(cprm.mm_flags);
68332+
68333+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
68334+ gr_handle_brute_attach(dumpable);
68335
68336 binfmt = mm->binfmt;
68337 if (!binfmt || !binfmt->core_dump)
68338 goto fail;
68339- if (!__get_dumpable(cprm.mm_flags))
68340+ if (!dumpable)
68341 goto fail;
68342
68343 cred = prepare_creds();
68344@@ -548,7 +555,7 @@ void do_coredump(const siginfo_t *siginfo)
68345 need_nonrelative = true;
68346 }
68347
68348- retval = coredump_wait(siginfo->si_signo, &core_state);
68349+ retval = coredump_wait(signr, &core_state);
68350 if (retval < 0)
68351 goto fail_creds;
68352
68353@@ -591,7 +598,7 @@ void do_coredump(const siginfo_t *siginfo)
68354 }
68355 cprm.limit = RLIM_INFINITY;
68356
68357- dump_count = atomic_inc_return(&core_dump_count);
68358+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
68359 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
68360 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
68361 task_tgid_vnr(current), current->comm);
68362@@ -623,6 +630,8 @@ void do_coredump(const siginfo_t *siginfo)
68363 } else {
68364 struct inode *inode;
68365
68366+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
68367+
68368 if (cprm.limit < binfmt->min_coredump)
68369 goto fail_unlock;
68370
68371@@ -681,7 +690,7 @@ close_fail:
68372 filp_close(cprm.file, NULL);
68373 fail_dropcount:
68374 if (ispipe)
68375- atomic_dec(&core_dump_count);
68376+ atomic_dec_unchecked(&core_dump_count);
68377 fail_unlock:
68378 kfree(cn.corename);
68379 coredump_finish(mm, core_dumped);
68380@@ -702,6 +711,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
68381 struct file *file = cprm->file;
68382 loff_t pos = file->f_pos;
68383 ssize_t n;
68384+
68385+ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
68386 if (cprm->written + nr > cprm->limit)
68387 return 0;
68388 while (nr) {
68389diff --git a/fs/dcache.c b/fs/dcache.c
68390index c71e373..05e38ae 100644
68391--- a/fs/dcache.c
68392+++ b/fs/dcache.c
68393@@ -511,7 +511,7 @@ static void __dentry_kill(struct dentry *dentry)
68394 * dentry_iput drops the locks, at which point nobody (except
68395 * transient RCU lookups) can reach this dentry.
68396 */
68397- BUG_ON(dentry->d_lockref.count > 0);
68398+ BUG_ON(__lockref_read(&dentry->d_lockref) > 0);
68399 this_cpu_dec(nr_dentry);
68400 if (dentry->d_op && dentry->d_op->d_release)
68401 dentry->d_op->d_release(dentry);
68402@@ -564,7 +564,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
68403 struct dentry *parent = dentry->d_parent;
68404 if (IS_ROOT(dentry))
68405 return NULL;
68406- if (unlikely(dentry->d_lockref.count < 0))
68407+ if (unlikely(__lockref_read(&dentry->d_lockref) < 0))
68408 return NULL;
68409 if (likely(spin_trylock(&parent->d_lock)))
68410 return parent;
68411@@ -626,8 +626,8 @@ static inline bool fast_dput(struct dentry *dentry)
68412 */
68413 if (unlikely(ret < 0)) {
68414 spin_lock(&dentry->d_lock);
68415- if (dentry->d_lockref.count > 1) {
68416- dentry->d_lockref.count--;
68417+ if (__lockref_read(&dentry->d_lockref) > 1) {
68418+ __lockref_dec(&dentry->d_lockref);
68419 spin_unlock(&dentry->d_lock);
68420 return 1;
68421 }
68422@@ -682,7 +682,7 @@ static inline bool fast_dput(struct dentry *dentry)
68423 * else could have killed it and marked it dead. Either way, we
68424 * don't need to do anything else.
68425 */
68426- if (dentry->d_lockref.count) {
68427+ if (__lockref_read(&dentry->d_lockref)) {
68428 spin_unlock(&dentry->d_lock);
68429 return 1;
68430 }
68431@@ -692,7 +692,7 @@ static inline bool fast_dput(struct dentry *dentry)
68432 * lock, and we just tested that it was zero, so we can just
68433 * set it to 1.
68434 */
68435- dentry->d_lockref.count = 1;
68436+ __lockref_set(&dentry->d_lockref, 1);
68437 return 0;
68438 }
68439
68440@@ -751,7 +751,7 @@ repeat:
68441 dentry->d_flags |= DCACHE_REFERENCED;
68442 dentry_lru_add(dentry);
68443
68444- dentry->d_lockref.count--;
68445+ __lockref_dec(&dentry->d_lockref);
68446 spin_unlock(&dentry->d_lock);
68447 return;
68448
68449@@ -766,7 +766,7 @@ EXPORT_SYMBOL(dput);
68450 /* This must be called with d_lock held */
68451 static inline void __dget_dlock(struct dentry *dentry)
68452 {
68453- dentry->d_lockref.count++;
68454+ __lockref_inc(&dentry->d_lockref);
68455 }
68456
68457 static inline void __dget(struct dentry *dentry)
68458@@ -807,8 +807,8 @@ repeat:
68459 goto repeat;
68460 }
68461 rcu_read_unlock();
68462- BUG_ON(!ret->d_lockref.count);
68463- ret->d_lockref.count++;
68464+ BUG_ON(!__lockref_read(&ret->d_lockref));
68465+ __lockref_inc(&ret->d_lockref);
68466 spin_unlock(&ret->d_lock);
68467 return ret;
68468 }
68469@@ -886,9 +886,9 @@ restart:
68470 spin_lock(&inode->i_lock);
68471 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
68472 spin_lock(&dentry->d_lock);
68473- if (!dentry->d_lockref.count) {
68474+ if (!__lockref_read(&dentry->d_lockref)) {
68475 struct dentry *parent = lock_parent(dentry);
68476- if (likely(!dentry->d_lockref.count)) {
68477+ if (likely(!__lockref_read(&dentry->d_lockref))) {
68478 __dentry_kill(dentry);
68479 dput(parent);
68480 goto restart;
68481@@ -923,7 +923,7 @@ static void shrink_dentry_list(struct list_head *list)
68482 * We found an inuse dentry which was not removed from
68483 * the LRU because of laziness during lookup. Do not free it.
68484 */
68485- if (dentry->d_lockref.count > 0) {
68486+ if (__lockref_read(&dentry->d_lockref) > 0) {
68487 spin_unlock(&dentry->d_lock);
68488 if (parent)
68489 spin_unlock(&parent->d_lock);
68490@@ -961,8 +961,8 @@ static void shrink_dentry_list(struct list_head *list)
68491 dentry = parent;
68492 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
68493 parent = lock_parent(dentry);
68494- if (dentry->d_lockref.count != 1) {
68495- dentry->d_lockref.count--;
68496+ if (__lockref_read(&dentry->d_lockref) != 1) {
68497+ __lockref_inc(&dentry->d_lockref);
68498 spin_unlock(&dentry->d_lock);
68499 if (parent)
68500 spin_unlock(&parent->d_lock);
68501@@ -1002,7 +1002,7 @@ static enum lru_status dentry_lru_isolate(struct list_head *item,
68502 * counts, just remove them from the LRU. Otherwise give them
68503 * another pass through the LRU.
68504 */
68505- if (dentry->d_lockref.count) {
68506+ if (__lockref_read(&dentry->d_lockref)) {
68507 d_lru_isolate(lru, dentry);
68508 spin_unlock(&dentry->d_lock);
68509 return LRU_REMOVED;
68510@@ -1205,13 +1205,13 @@ ascend:
68511 /* might go back up the wrong parent if we have had a rename. */
68512 if (need_seqretry(&rename_lock, seq))
68513 goto rename_retry;
68514- next = child->d_child.next;
68515- while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) {
68516+ /* go into the first sibling still alive */
68517+ do {
68518+ next = child->d_child.next;
68519 if (next == &this_parent->d_subdirs)
68520 goto ascend;
68521 child = list_entry(next, struct dentry, d_child);
68522- next = next->next;
68523- }
68524+ } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
68525 rcu_read_unlock();
68526 goto resume;
68527 }
68528@@ -1336,7 +1336,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
68529 } else {
68530 if (dentry->d_flags & DCACHE_LRU_LIST)
68531 d_lru_del(dentry);
68532- if (!dentry->d_lockref.count) {
68533+ if (!__lockref_read(&dentry->d_lockref)) {
68534 d_shrink_add(dentry, &data->dispose);
68535 data->found++;
68536 }
68537@@ -1384,7 +1384,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
68538 return D_WALK_CONTINUE;
68539
68540 /* root with refcount 1 is fine */
68541- if (dentry == _data && dentry->d_lockref.count == 1)
68542+ if (dentry == _data && __lockref_read(&dentry->d_lockref) == 1)
68543 return D_WALK_CONTINUE;
68544
68545 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
68546@@ -1393,7 +1393,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
68547 dentry->d_inode ?
68548 dentry->d_inode->i_ino : 0UL,
68549 dentry,
68550- dentry->d_lockref.count,
68551+ __lockref_read(&dentry->d_lockref),
68552 dentry->d_sb->s_type->name,
68553 dentry->d_sb->s_id);
68554 WARN_ON(1);
68555@@ -1534,7 +1534,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
68556 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
68557 if (name->len > DNAME_INLINE_LEN-1) {
68558 size_t size = offsetof(struct external_name, name[1]);
68559- struct external_name *p = kmalloc(size + name->len, GFP_KERNEL);
68560+ struct external_name *p = kmalloc(round_up(size + name->len, sizeof(unsigned long)), GFP_KERNEL);
68561 if (!p) {
68562 kmem_cache_free(dentry_cache, dentry);
68563 return NULL;
68564@@ -1557,7 +1557,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
68565 smp_wmb();
68566 dentry->d_name.name = dname;
68567
68568- dentry->d_lockref.count = 1;
68569+ __lockref_set(&dentry->d_lockref, 1);
68570 dentry->d_flags = 0;
68571 spin_lock_init(&dentry->d_lock);
68572 seqcount_init(&dentry->d_seq);
68573@@ -1566,6 +1566,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
68574 dentry->d_sb = sb;
68575 dentry->d_op = NULL;
68576 dentry->d_fsdata = NULL;
68577+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
68578+ atomic_set(&dentry->chroot_refcnt, 0);
68579+#endif
68580 INIT_HLIST_BL_NODE(&dentry->d_hash);
68581 INIT_LIST_HEAD(&dentry->d_lru);
68582 INIT_LIST_HEAD(&dentry->d_subdirs);
68583@@ -2290,7 +2293,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
68584 goto next;
68585 }
68586
68587- dentry->d_lockref.count++;
68588+ __lockref_inc(&dentry->d_lockref);
68589 found = dentry;
68590 spin_unlock(&dentry->d_lock);
68591 break;
68592@@ -2358,7 +2361,7 @@ again:
68593 spin_lock(&dentry->d_lock);
68594 inode = dentry->d_inode;
68595 isdir = S_ISDIR(inode->i_mode);
68596- if (dentry->d_lockref.count == 1) {
68597+ if (__lockref_read(&dentry->d_lockref) == 1) {
68598 if (!spin_trylock(&inode->i_lock)) {
68599 spin_unlock(&dentry->d_lock);
68600 cpu_relax();
68601@@ -3311,7 +3314,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
68602
68603 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
68604 dentry->d_flags |= DCACHE_GENOCIDE;
68605- dentry->d_lockref.count--;
68606+ __lockref_dec(&dentry->d_lockref);
68607 }
68608 }
68609 return D_WALK_CONTINUE;
68610@@ -3427,7 +3430,8 @@ void __init vfs_caches_init(unsigned long mempages)
68611 mempages -= reserve;
68612
68613 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
68614- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
68615+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
68616+ SLAB_NO_SANITIZE, NULL);
68617
68618 dcache_init();
68619 inode_init();
68620diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
68621index 96400ab..906103d 100644
68622--- a/fs/debugfs/inode.c
68623+++ b/fs/debugfs/inode.c
68624@@ -386,6 +386,10 @@ struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
68625 }
68626 EXPORT_SYMBOL_GPL(debugfs_create_file_size);
68627
68628+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
68629+extern int grsec_enable_sysfs_restrict;
68630+#endif
68631+
68632 /**
68633 * debugfs_create_dir - create a directory in the debugfs filesystem
68634 * @name: a pointer to a string containing the name of the directory to
68635@@ -404,6 +408,10 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_size);
68636 * If debugfs is not enabled in the kernel, the value -%ENODEV will be
68637 * returned.
68638 */
68639+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
68640+extern int grsec_enable_sysfs_restrict;
68641+#endif
68642+
68643 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
68644 {
68645 struct dentry *dentry = start_creating(name, parent);
68646@@ -416,7 +424,12 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
68647 if (unlikely(!inode))
68648 return failed_creating(dentry);
68649
68650- inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
68651+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
68652+ if (grsec_enable_sysfs_restrict)
68653+ inode->i_mode = S_IFDIR | S_IRWXU;
68654+ else
68655+#endif
68656+ inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
68657 inode->i_op = &simple_dir_inode_operations;
68658 inode->i_fop = &simple_dir_operations;
68659
68660diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
68661index b08b518..d6acffa 100644
68662--- a/fs/ecryptfs/inode.c
68663+++ b/fs/ecryptfs/inode.c
68664@@ -663,7 +663,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
68665 old_fs = get_fs();
68666 set_fs(get_ds());
68667 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
68668- (char __user *)lower_buf,
68669+ (char __force_user *)lower_buf,
68670 PATH_MAX);
68671 set_fs(old_fs);
68672 if (rc < 0)
68673diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
68674index e4141f2..d8263e8 100644
68675--- a/fs/ecryptfs/miscdev.c
68676+++ b/fs/ecryptfs/miscdev.c
68677@@ -304,7 +304,7 @@ check_list:
68678 goto out_unlock_msg_ctx;
68679 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
68680 if (msg_ctx->msg) {
68681- if (copy_to_user(&buf[i], packet_length, packet_length_size))
68682+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
68683 goto out_unlock_msg_ctx;
68684 i += packet_length_size;
68685 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
68686diff --git a/fs/exec.c b/fs/exec.c
68687index 00400cf..b9d927b 100644
68688--- a/fs/exec.c
68689+++ b/fs/exec.c
68690@@ -56,8 +56,20 @@
68691 #include <linux/pipe_fs_i.h>
68692 #include <linux/oom.h>
68693 #include <linux/compat.h>
68694+#include <linux/random.h>
68695+#include <linux/seq_file.h>
68696+#include <linux/coredump.h>
68697+#include <linux/mman.h>
68698+
68699+#ifdef CONFIG_PAX_REFCOUNT
68700+#include <linux/kallsyms.h>
68701+#include <linux/kdebug.h>
68702+#endif
68703+
68704+#include <trace/events/fs.h>
68705
68706 #include <asm/uaccess.h>
68707+#include <asm/sections.h>
68708 #include <asm/mmu_context.h>
68709 #include <asm/tlb.h>
68710
68711@@ -66,19 +78,34 @@
68712
68713 #include <trace/events/sched.h>
68714
68715+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
68716+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
68717+{
68718+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
68719+}
68720+#endif
68721+
68722+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
68723+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
68724+EXPORT_SYMBOL(pax_set_initial_flags_func);
68725+#endif
68726+
68727 int suid_dumpable = 0;
68728
68729 static LIST_HEAD(formats);
68730 static DEFINE_RWLOCK(binfmt_lock);
68731
68732+extern int gr_process_kernel_exec_ban(void);
68733+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
68734+
68735 void __register_binfmt(struct linux_binfmt * fmt, int insert)
68736 {
68737 BUG_ON(!fmt);
68738 if (WARN_ON(!fmt->load_binary))
68739 return;
68740 write_lock(&binfmt_lock);
68741- insert ? list_add(&fmt->lh, &formats) :
68742- list_add_tail(&fmt->lh, &formats);
68743+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
68744+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
68745 write_unlock(&binfmt_lock);
68746 }
68747
68748@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
68749 void unregister_binfmt(struct linux_binfmt * fmt)
68750 {
68751 write_lock(&binfmt_lock);
68752- list_del(&fmt->lh);
68753+ pax_list_del((struct list_head *)&fmt->lh);
68754 write_unlock(&binfmt_lock);
68755 }
68756
68757@@ -183,18 +210,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
68758 int write)
68759 {
68760 struct page *page;
68761- int ret;
68762
68763-#ifdef CONFIG_STACK_GROWSUP
68764- if (write) {
68765- ret = expand_downwards(bprm->vma, pos);
68766- if (ret < 0)
68767- return NULL;
68768- }
68769-#endif
68770- ret = get_user_pages(current, bprm->mm, pos,
68771- 1, write, 1, &page, NULL);
68772- if (ret <= 0)
68773+ if (0 > expand_downwards(bprm->vma, pos))
68774+ return NULL;
68775+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
68776 return NULL;
68777
68778 if (write) {
68779@@ -210,6 +229,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
68780 if (size <= ARG_MAX)
68781 return page;
68782
68783+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68784+ // only allow 512KB for argv+env on suid/sgid binaries
68785+ // to prevent easy ASLR exhaustion
68786+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
68787+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
68788+ (size > (512 * 1024))) {
68789+ put_page(page);
68790+ return NULL;
68791+ }
68792+#endif
68793+
68794 /*
68795 * Limit to 1/4-th the stack size for the argv+env strings.
68796 * This ensures that:
68797@@ -269,6 +299,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
68798 vma->vm_end = STACK_TOP_MAX;
68799 vma->vm_start = vma->vm_end - PAGE_SIZE;
68800 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
68801+
68802+#ifdef CONFIG_PAX_SEGMEXEC
68803+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68804+#endif
68805+
68806 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68807 INIT_LIST_HEAD(&vma->anon_vma_chain);
68808
68809@@ -280,6 +315,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
68810 arch_bprm_mm_init(mm, vma);
68811 up_write(&mm->mmap_sem);
68812 bprm->p = vma->vm_end - sizeof(void *);
68813+
68814+#ifdef CONFIG_PAX_RANDUSTACK
68815+ if (randomize_va_space)
68816+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
68817+#endif
68818+
68819 return 0;
68820 err:
68821 up_write(&mm->mmap_sem);
68822@@ -396,7 +437,7 @@ struct user_arg_ptr {
68823 } ptr;
68824 };
68825
68826-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
68827+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
68828 {
68829 const char __user *native;
68830
68831@@ -405,14 +446,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
68832 compat_uptr_t compat;
68833
68834 if (get_user(compat, argv.ptr.compat + nr))
68835- return ERR_PTR(-EFAULT);
68836+ return (const char __force_user *)ERR_PTR(-EFAULT);
68837
68838 return compat_ptr(compat);
68839 }
68840 #endif
68841
68842 if (get_user(native, argv.ptr.native + nr))
68843- return ERR_PTR(-EFAULT);
68844+ return (const char __force_user *)ERR_PTR(-EFAULT);
68845
68846 return native;
68847 }
68848@@ -431,7 +472,7 @@ static int count(struct user_arg_ptr argv, int max)
68849 if (!p)
68850 break;
68851
68852- if (IS_ERR(p))
68853+ if (IS_ERR((const char __force_kernel *)p))
68854 return -EFAULT;
68855
68856 if (i >= max)
68857@@ -466,7 +507,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
68858
68859 ret = -EFAULT;
68860 str = get_user_arg_ptr(argv, argc);
68861- if (IS_ERR(str))
68862+ if (IS_ERR((const char __force_kernel *)str))
68863 goto out;
68864
68865 len = strnlen_user(str, MAX_ARG_STRLEN);
68866@@ -548,7 +589,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
68867 int r;
68868 mm_segment_t oldfs = get_fs();
68869 struct user_arg_ptr argv = {
68870- .ptr.native = (const char __user *const __user *)__argv,
68871+ .ptr.native = (const char __user * const __force_user *)__argv,
68872 };
68873
68874 set_fs(KERNEL_DS);
68875@@ -583,7 +624,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
68876 unsigned long new_end = old_end - shift;
68877 struct mmu_gather tlb;
68878
68879- BUG_ON(new_start > new_end);
68880+ if (new_start >= new_end || new_start < mmap_min_addr)
68881+ return -ENOMEM;
68882
68883 /*
68884 * ensure there are no vmas between where we want to go
68885@@ -592,6 +634,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
68886 if (vma != find_vma(mm, new_start))
68887 return -EFAULT;
68888
68889+#ifdef CONFIG_PAX_SEGMEXEC
68890+ BUG_ON(pax_find_mirror_vma(vma));
68891+#endif
68892+
68893 /*
68894 * cover the whole range: [new_start, old_end)
68895 */
68896@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
68897 stack_top = arch_align_stack(stack_top);
68898 stack_top = PAGE_ALIGN(stack_top);
68899
68900- if (unlikely(stack_top < mmap_min_addr) ||
68901- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
68902- return -ENOMEM;
68903-
68904 stack_shift = vma->vm_end - stack_top;
68905
68906 bprm->p -= stack_shift;
68907@@ -687,8 +729,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
68908 bprm->exec -= stack_shift;
68909
68910 down_write(&mm->mmap_sem);
68911+
68912+ /* Move stack pages down in memory. */
68913+ if (stack_shift) {
68914+ ret = shift_arg_pages(vma, stack_shift);
68915+ if (ret)
68916+ goto out_unlock;
68917+ }
68918+
68919 vm_flags = VM_STACK_FLAGS;
68920
68921+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
68922+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
68923+ vm_flags &= ~VM_EXEC;
68924+
68925+#ifdef CONFIG_PAX_MPROTECT
68926+ if (mm->pax_flags & MF_PAX_MPROTECT)
68927+ vm_flags &= ~VM_MAYEXEC;
68928+#endif
68929+
68930+ }
68931+#endif
68932+
68933 /*
68934 * Adjust stack execute permissions; explicitly enable for
68935 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
68936@@ -707,13 +769,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
68937 goto out_unlock;
68938 BUG_ON(prev != vma);
68939
68940- /* Move stack pages down in memory. */
68941- if (stack_shift) {
68942- ret = shift_arg_pages(vma, stack_shift);
68943- if (ret)
68944- goto out_unlock;
68945- }
68946-
68947 /* mprotect_fixup is overkill to remove the temporary stack flags */
68948 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
68949
68950@@ -737,6 +792,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
68951 #endif
68952 current->mm->start_stack = bprm->p;
68953 ret = expand_stack(vma, stack_base);
68954+
68955+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
68956+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
68957+ unsigned long size;
68958+ vm_flags_t vm_flags;
68959+
68960+ size = STACK_TOP - vma->vm_end;
68961+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
68962+
68963+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
68964+
68965+#ifdef CONFIG_X86
68966+ if (!ret) {
68967+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
68968+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
68969+ }
68970+#endif
68971+
68972+ }
68973+#endif
68974+
68975 if (ret)
68976 ret = -EFAULT;
68977
68978@@ -781,8 +857,10 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
68979 if (err)
68980 goto exit;
68981
68982- if (name->name[0] != '\0')
68983+ if (name->name[0] != '\0') {
68984 fsnotify_open(file);
68985+ trace_open_exec(name->name);
68986+ }
68987
68988 out:
68989 return file;
68990@@ -815,7 +893,7 @@ int kernel_read(struct file *file, loff_t offset,
68991 old_fs = get_fs();
68992 set_fs(get_ds());
68993 /* The cast to a user pointer is valid due to the set_fs() */
68994- result = vfs_read(file, (void __user *)addr, count, &pos);
68995+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
68996 set_fs(old_fs);
68997 return result;
68998 }
68999@@ -860,6 +938,7 @@ static int exec_mmap(struct mm_struct *mm)
69000 tsk->mm = mm;
69001 tsk->active_mm = mm;
69002 activate_mm(active_mm, mm);
69003+ populate_stack();
69004 tsk->mm->vmacache_seqnum = 0;
69005 vmacache_flush(tsk);
69006 task_unlock(tsk);
69007@@ -926,10 +1005,14 @@ static int de_thread(struct task_struct *tsk)
69008 if (!thread_group_leader(tsk)) {
69009 struct task_struct *leader = tsk->group_leader;
69010
69011- sig->notify_count = -1; /* for exit_notify() */
69012 for (;;) {
69013 threadgroup_change_begin(tsk);
69014 write_lock_irq(&tasklist_lock);
69015+ /*
69016+ * Do this under tasklist_lock to ensure that
69017+ * exit_notify() can't miss ->group_exit_task
69018+ */
69019+ sig->notify_count = -1;
69020 if (likely(leader->exit_state))
69021 break;
69022 __set_current_state(TASK_KILLABLE);
69023@@ -1258,7 +1341,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
69024 }
69025 rcu_read_unlock();
69026
69027- if (p->fs->users > n_fs)
69028+ if (atomic_read(&p->fs->users) > n_fs)
69029 bprm->unsafe |= LSM_UNSAFE_SHARE;
69030 else
69031 p->fs->in_exec = 1;
69032@@ -1459,6 +1542,31 @@ static int exec_binprm(struct linux_binprm *bprm)
69033 return ret;
69034 }
69035
69036+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
69037+static DEFINE_PER_CPU(u64, exec_counter);
69038+static int __init init_exec_counters(void)
69039+{
69040+ unsigned int cpu;
69041+
69042+ for_each_possible_cpu(cpu) {
69043+ per_cpu(exec_counter, cpu) = (u64)cpu;
69044+ }
69045+
69046+ return 0;
69047+}
69048+early_initcall(init_exec_counters);
69049+static inline void increment_exec_counter(void)
69050+{
69051+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
69052+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
69053+}
69054+#else
69055+static inline void increment_exec_counter(void) {}
69056+#endif
69057+
69058+extern void gr_handle_exec_args(struct linux_binprm *bprm,
69059+ struct user_arg_ptr argv);
69060+
69061 /*
69062 * sys_execve() executes a new program.
69063 */
69064@@ -1467,6 +1575,11 @@ static int do_execveat_common(int fd, struct filename *filename,
69065 struct user_arg_ptr envp,
69066 int flags)
69067 {
69068+#ifdef CONFIG_GRKERNSEC
69069+ struct file *old_exec_file;
69070+ struct acl_subject_label *old_acl;
69071+ struct rlimit old_rlim[RLIM_NLIMITS];
69072+#endif
69073 char *pathbuf = NULL;
69074 struct linux_binprm *bprm;
69075 struct file *file;
69076@@ -1476,6 +1589,8 @@ static int do_execveat_common(int fd, struct filename *filename,
69077 if (IS_ERR(filename))
69078 return PTR_ERR(filename);
69079
69080+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
69081+
69082 /*
69083 * We move the actual failure in case of RLIMIT_NPROC excess from
69084 * set*uid() to execve() because too many poorly written programs
69085@@ -1513,6 +1628,11 @@ static int do_execveat_common(int fd, struct filename *filename,
69086 if (IS_ERR(file))
69087 goto out_unmark;
69088
69089+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
69090+ retval = -EPERM;
69091+ goto out_unmark;
69092+ }
69093+
69094 sched_exec();
69095
69096 bprm->file = file;
69097@@ -1539,6 +1659,11 @@ static int do_execveat_common(int fd, struct filename *filename,
69098 }
69099 bprm->interp = bprm->filename;
69100
69101+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
69102+ retval = -EACCES;
69103+ goto out_unmark;
69104+ }
69105+
69106 retval = bprm_mm_init(bprm);
69107 if (retval)
69108 goto out_unmark;
69109@@ -1555,24 +1680,70 @@ static int do_execveat_common(int fd, struct filename *filename,
69110 if (retval < 0)
69111 goto out;
69112
69113+#ifdef CONFIG_GRKERNSEC
69114+ old_acl = current->acl;
69115+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
69116+ old_exec_file = current->exec_file;
69117+ get_file(file);
69118+ current->exec_file = file;
69119+#endif
69120+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
69121+ /* limit suid stack to 8MB
69122+ * we saved the old limits above and will restore them if this exec fails
69123+ */
69124+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
69125+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
69126+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
69127+#endif
69128+
69129+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
69130+ retval = -EPERM;
69131+ goto out_fail;
69132+ }
69133+
69134+ if (!gr_tpe_allow(file)) {
69135+ retval = -EACCES;
69136+ goto out_fail;
69137+ }
69138+
69139+ if (gr_check_crash_exec(file)) {
69140+ retval = -EACCES;
69141+ goto out_fail;
69142+ }
69143+
69144+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
69145+ bprm->unsafe);
69146+ if (retval < 0)
69147+ goto out_fail;
69148+
69149 retval = copy_strings_kernel(1, &bprm->filename, bprm);
69150 if (retval < 0)
69151- goto out;
69152+ goto out_fail;
69153
69154 bprm->exec = bprm->p;
69155 retval = copy_strings(bprm->envc, envp, bprm);
69156 if (retval < 0)
69157- goto out;
69158+ goto out_fail;
69159
69160 retval = copy_strings(bprm->argc, argv, bprm);
69161 if (retval < 0)
69162- goto out;
69163+ goto out_fail;
69164+
69165+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
69166+
69167+ gr_handle_exec_args(bprm, argv);
69168
69169 retval = exec_binprm(bprm);
69170 if (retval < 0)
69171- goto out;
69172+ goto out_fail;
69173+#ifdef CONFIG_GRKERNSEC
69174+ if (old_exec_file)
69175+ fput(old_exec_file);
69176+#endif
69177
69178 /* execve succeeded */
69179+
69180+ increment_exec_counter();
69181 current->fs->in_exec = 0;
69182 current->in_execve = 0;
69183 acct_update_integrals(current);
69184@@ -1584,6 +1755,14 @@ static int do_execveat_common(int fd, struct filename *filename,
69185 put_files_struct(displaced);
69186 return retval;
69187
69188+out_fail:
69189+#ifdef CONFIG_GRKERNSEC
69190+ current->acl = old_acl;
69191+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
69192+ fput(current->exec_file);
69193+ current->exec_file = old_exec_file;
69194+#endif
69195+
69196 out:
69197 if (bprm->mm) {
69198 acct_arg_size(bprm, 0);
69199@@ -1730,3 +1909,312 @@ COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
69200 argv, envp, flags);
69201 }
69202 #endif
69203+
69204+int pax_check_flags(unsigned long *flags)
69205+{
69206+ int retval = 0;
69207+
69208+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
69209+ if (*flags & MF_PAX_SEGMEXEC)
69210+ {
69211+ *flags &= ~MF_PAX_SEGMEXEC;
69212+ retval = -EINVAL;
69213+ }
69214+#endif
69215+
69216+ if ((*flags & MF_PAX_PAGEEXEC)
69217+
69218+#ifdef CONFIG_PAX_PAGEEXEC
69219+ && (*flags & MF_PAX_SEGMEXEC)
69220+#endif
69221+
69222+ )
69223+ {
69224+ *flags &= ~MF_PAX_PAGEEXEC;
69225+ retval = -EINVAL;
69226+ }
69227+
69228+ if ((*flags & MF_PAX_MPROTECT)
69229+
69230+#ifdef CONFIG_PAX_MPROTECT
69231+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
69232+#endif
69233+
69234+ )
69235+ {
69236+ *flags &= ~MF_PAX_MPROTECT;
69237+ retval = -EINVAL;
69238+ }
69239+
69240+ if ((*flags & MF_PAX_EMUTRAMP)
69241+
69242+#ifdef CONFIG_PAX_EMUTRAMP
69243+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
69244+#endif
69245+
69246+ )
69247+ {
69248+ *flags &= ~MF_PAX_EMUTRAMP;
69249+ retval = -EINVAL;
69250+ }
69251+
69252+ return retval;
69253+}
69254+
69255+EXPORT_SYMBOL(pax_check_flags);
69256+
69257+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69258+char *pax_get_path(const struct path *path, char *buf, int buflen)
69259+{
69260+ char *pathname = d_path(path, buf, buflen);
69261+
69262+ if (IS_ERR(pathname))
69263+ goto toolong;
69264+
69265+ pathname = mangle_path(buf, pathname, "\t\n\\");
69266+ if (!pathname)
69267+ goto toolong;
69268+
69269+ *pathname = 0;
69270+ return buf;
69271+
69272+toolong:
69273+ return "<path too long>";
69274+}
69275+EXPORT_SYMBOL(pax_get_path);
69276+
69277+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
69278+{
69279+ struct task_struct *tsk = current;
69280+ struct mm_struct *mm = current->mm;
69281+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
69282+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
69283+ char *path_exec = NULL;
69284+ char *path_fault = NULL;
69285+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
69286+ siginfo_t info = { };
69287+
69288+ if (buffer_exec && buffer_fault) {
69289+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
69290+
69291+ down_read(&mm->mmap_sem);
69292+ vma = mm->mmap;
69293+ while (vma && (!vma_exec || !vma_fault)) {
69294+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
69295+ vma_exec = vma;
69296+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
69297+ vma_fault = vma;
69298+ vma = vma->vm_next;
69299+ }
69300+ if (vma_exec)
69301+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
69302+ if (vma_fault) {
69303+ start = vma_fault->vm_start;
69304+ end = vma_fault->vm_end;
69305+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
69306+ if (vma_fault->vm_file)
69307+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
69308+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
69309+ path_fault = "<heap>";
69310+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
69311+ path_fault = "<stack>";
69312+ else
69313+ path_fault = "<anonymous mapping>";
69314+ }
69315+ up_read(&mm->mmap_sem);
69316+ }
69317+ if (tsk->signal->curr_ip)
69318+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
69319+ else
69320+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
69321+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
69322+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
69323+ free_page((unsigned long)buffer_exec);
69324+ free_page((unsigned long)buffer_fault);
69325+ pax_report_insns(regs, pc, sp);
69326+ info.si_signo = SIGKILL;
69327+ info.si_errno = 0;
69328+ info.si_code = SI_KERNEL;
69329+ info.si_pid = 0;
69330+ info.si_uid = 0;
69331+ do_coredump(&info);
69332+}
69333+#endif
69334+
69335+#ifdef CONFIG_PAX_REFCOUNT
69336+void pax_report_refcount_overflow(struct pt_regs *regs)
69337+{
69338+ if (current->signal->curr_ip)
69339+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
69340+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
69341+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
69342+ else
69343+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
69344+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
69345+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
69346+ preempt_disable();
69347+ show_regs(regs);
69348+ preempt_enable();
69349+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
69350+}
69351+#endif
69352+
69353+#ifdef CONFIG_PAX_USERCOPY
69354+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
69355+static noinline int check_stack_object(const void *obj, unsigned long len)
69356+{
69357+ const void * const stack = task_stack_page(current);
69358+ const void * const stackend = stack + THREAD_SIZE;
69359+
69360+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
69361+ const void *frame = NULL;
69362+ const void *oldframe;
69363+#endif
69364+
69365+ if (obj + len < obj)
69366+ return -1;
69367+
69368+ if (obj + len <= stack || stackend <= obj)
69369+ return 0;
69370+
69371+ if (obj < stack || stackend < obj + len)
69372+ return -1;
69373+
69374+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
69375+ oldframe = __builtin_frame_address(1);
69376+ if (oldframe)
69377+ frame = __builtin_frame_address(2);
69378+ /*
69379+ low ----------------------------------------------> high
69380+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
69381+ ^----------------^
69382+ allow copies only within here
69383+ */
69384+ while (stack <= frame && frame < stackend) {
69385+ /* if obj + len extends past the last frame, this
69386+ check won't pass and the next frame will be 0,
69387+ causing us to bail out and correctly report
69388+ the copy as invalid
69389+ */
69390+ if (obj + len <= frame)
69391+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
69392+ oldframe = frame;
69393+ frame = *(const void * const *)frame;
69394+ }
69395+ return -1;
69396+#else
69397+ return 1;
69398+#endif
69399+}
69400+
69401+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
69402+{
69403+ if (current->signal->curr_ip)
69404+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
69405+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
69406+ else
69407+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
69408+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
69409+ dump_stack();
69410+ gr_handle_kernel_exploit();
69411+ do_group_exit(SIGKILL);
69412+}
69413+#endif
69414+
69415+#ifdef CONFIG_PAX_USERCOPY
69416+
69417+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
69418+{
69419+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
69420+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
69421+#ifdef CONFIG_MODULES
69422+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
69423+#else
69424+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
69425+#endif
69426+
69427+#else
69428+ unsigned long textlow = (unsigned long)_stext;
69429+ unsigned long texthigh = (unsigned long)_etext;
69430+
69431+#ifdef CONFIG_X86_64
69432+ /* check against linear mapping as well */
69433+ if (high > (unsigned long)__va(__pa(textlow)) &&
69434+ low < (unsigned long)__va(__pa(texthigh)))
69435+ return true;
69436+#endif
69437+
69438+#endif
69439+
69440+ if (high <= textlow || low >= texthigh)
69441+ return false;
69442+ else
69443+ return true;
69444+}
69445+#endif
69446+
69447+void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
69448+{
69449+#ifdef CONFIG_PAX_USERCOPY
69450+ const char *type;
69451+#endif
69452+
69453+#if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_X86_64)
69454+ unsigned long stackstart = (unsigned long)task_stack_page(current);
69455+ unsigned long currentsp = (unsigned long)&stackstart;
69456+ if (unlikely((currentsp < stackstart + 512 ||
69457+ currentsp >= stackstart + THREAD_SIZE) && !in_interrupt()))
69458+ BUG();
69459+#endif
69460+
69461+#ifndef CONFIG_PAX_USERCOPY_DEBUG
69462+ if (const_size)
69463+ return;
69464+#endif
69465+
69466+#ifdef CONFIG_PAX_USERCOPY
69467+ if (!n)
69468+ return;
69469+
69470+ type = check_heap_object(ptr, n);
69471+ if (!type) {
69472+ int ret = check_stack_object(ptr, n);
69473+ if (ret == 1 || ret == 2)
69474+ return;
69475+ if (ret == 0) {
69476+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
69477+ type = "<kernel text>";
69478+ else
69479+ return;
69480+ } else
69481+ type = "<process stack>";
69482+ }
69483+
69484+ pax_report_usercopy(ptr, n, to_user, type);
69485+#endif
69486+
69487+}
69488+EXPORT_SYMBOL(__check_object_size);
69489+
69490+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
69491+void pax_track_stack(void)
69492+{
69493+ unsigned long sp = (unsigned long)&sp;
69494+ if (sp < current_thread_info()->lowest_stack &&
69495+ sp >= (unsigned long)task_stack_page(current) + 2 * sizeof(unsigned long))
69496+ current_thread_info()->lowest_stack = sp;
69497+ if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16)))
69498+ BUG();
69499+}
69500+EXPORT_SYMBOL(pax_track_stack);
69501+#endif
69502+
69503+#ifdef CONFIG_PAX_SIZE_OVERFLOW
69504+void __nocapture(1, 3, 4) report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
69505+{
69506+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
69507+ dump_stack();
69508+ do_group_exit(SIGKILL);
69509+}
69510+EXPORT_SYMBOL(report_size_overflow);
69511+#endif
69512diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
69513index 9f9992b..8b59411 100644
69514--- a/fs/ext2/balloc.c
69515+++ b/fs/ext2/balloc.c
69516@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
69517
69518 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
69519 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
69520- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
69521+ if (free_blocks < root_blocks + 1 &&
69522 !uid_eq(sbi->s_resuid, current_fsuid()) &&
69523 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
69524- !in_group_p (sbi->s_resgid))) {
69525+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
69526 return 0;
69527 }
69528 return 1;
69529diff --git a/fs/ext2/super.c b/fs/ext2/super.c
69530index d0e746e..82e06f0 100644
69531--- a/fs/ext2/super.c
69532+++ b/fs/ext2/super.c
69533@@ -267,10 +267,8 @@ static int ext2_show_options(struct seq_file *seq, struct dentry *root)
69534 #ifdef CONFIG_EXT2_FS_XATTR
69535 if (test_opt(sb, XATTR_USER))
69536 seq_puts(seq, ",user_xattr");
69537- if (!test_opt(sb, XATTR_USER) &&
69538- (def_mount_opts & EXT2_DEFM_XATTR_USER)) {
69539+ if (!test_opt(sb, XATTR_USER))
69540 seq_puts(seq, ",nouser_xattr");
69541- }
69542 #endif
69543
69544 #ifdef CONFIG_EXT2_FS_POSIX_ACL
69545@@ -856,8 +854,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
69546 if (def_mount_opts & EXT2_DEFM_UID16)
69547 set_opt(sbi->s_mount_opt, NO_UID32);
69548 #ifdef CONFIG_EXT2_FS_XATTR
69549- if (def_mount_opts & EXT2_DEFM_XATTR_USER)
69550- set_opt(sbi->s_mount_opt, XATTR_USER);
69551+ /* always enable user xattrs */
69552+ set_opt(sbi->s_mount_opt, XATTR_USER);
69553 #endif
69554 #ifdef CONFIG_EXT2_FS_POSIX_ACL
69555 if (def_mount_opts & EXT2_DEFM_ACL)
69556diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
69557index 9142614..97484fa 100644
69558--- a/fs/ext2/xattr.c
69559+++ b/fs/ext2/xattr.c
69560@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
69561 struct buffer_head *bh = NULL;
69562 struct ext2_xattr_entry *entry;
69563 char *end;
69564- size_t rest = buffer_size;
69565+ size_t rest = buffer_size, total_size = 0;
69566 int error;
69567
69568 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
69569@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
69570 buffer += size;
69571 }
69572 rest -= size;
69573+ total_size += size;
69574 }
69575 }
69576- error = buffer_size - rest; /* total size */
69577+ error = total_size;
69578
69579 cleanup:
69580 brelse(bh);
69581diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
69582index 158b5d4..2432610 100644
69583--- a/fs/ext3/balloc.c
69584+++ b/fs/ext3/balloc.c
69585@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
69586
69587 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
69588 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
69589- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
69590+ if (free_blocks < root_blocks + 1 &&
69591 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
69592 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
69593- !in_group_p (sbi->s_resgid))) {
69594+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
69595 return 0;
69596 }
69597 return 1;
69598diff --git a/fs/ext3/super.c b/fs/ext3/super.c
69599index d4dbf3c..906a6fb 100644
69600--- a/fs/ext3/super.c
69601+++ b/fs/ext3/super.c
69602@@ -655,10 +655,8 @@ static int ext3_show_options(struct seq_file *seq, struct dentry *root)
69603 #ifdef CONFIG_EXT3_FS_XATTR
69604 if (test_opt(sb, XATTR_USER))
69605 seq_puts(seq, ",user_xattr");
69606- if (!test_opt(sb, XATTR_USER) &&
69607- (def_mount_opts & EXT3_DEFM_XATTR_USER)) {
69608+ if (!test_opt(sb, XATTR_USER))
69609 seq_puts(seq, ",nouser_xattr");
69610- }
69611 #endif
69612 #ifdef CONFIG_EXT3_FS_POSIX_ACL
69613 if (test_opt(sb, POSIX_ACL))
69614@@ -1760,8 +1758,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
69615 if (def_mount_opts & EXT3_DEFM_UID16)
69616 set_opt(sbi->s_mount_opt, NO_UID32);
69617 #ifdef CONFIG_EXT3_FS_XATTR
69618- if (def_mount_opts & EXT3_DEFM_XATTR_USER)
69619- set_opt(sbi->s_mount_opt, XATTR_USER);
69620+ /* always enable user xattrs */
69621+ set_opt(sbi->s_mount_opt, XATTR_USER);
69622 #endif
69623 #ifdef CONFIG_EXT3_FS_POSIX_ACL
69624 if (def_mount_opts & EXT3_DEFM_ACL)
69625diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
69626index c6874be..f8a6ae8 100644
69627--- a/fs/ext3/xattr.c
69628+++ b/fs/ext3/xattr.c
69629@@ -330,7 +330,7 @@ static int
69630 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
69631 char *buffer, size_t buffer_size)
69632 {
69633- size_t rest = buffer_size;
69634+ size_t rest = buffer_size, total_size = 0;
69635
69636 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
69637 const struct xattr_handler *handler =
69638@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
69639 buffer += size;
69640 }
69641 rest -= size;
69642+ total_size += size;
69643 }
69644 }
69645- return buffer_size - rest;
69646+ return total_size;
69647 }
69648
69649 static int
69650diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
69651index 83a6f49..d4e4d03 100644
69652--- a/fs/ext4/balloc.c
69653+++ b/fs/ext4/balloc.c
69654@@ -557,8 +557,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
69655 /* Hm, nope. Are (enough) root reserved clusters available? */
69656 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
69657 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
69658- capable(CAP_SYS_RESOURCE) ||
69659- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
69660+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
69661+ capable_nolog(CAP_SYS_RESOURCE)) {
69662
69663 if (free_clusters >= (nclusters + dirty_clusters +
69664 resv_clusters))
69665diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
69666index f63c3d5..3c1a033 100644
69667--- a/fs/ext4/ext4.h
69668+++ b/fs/ext4/ext4.h
69669@@ -1287,19 +1287,19 @@ struct ext4_sb_info {
69670 unsigned long s_mb_last_start;
69671
69672 /* stats for buddy allocator */
69673- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
69674- atomic_t s_bal_success; /* we found long enough chunks */
69675- atomic_t s_bal_allocated; /* in blocks */
69676- atomic_t s_bal_ex_scanned; /* total extents scanned */
69677- atomic_t s_bal_goals; /* goal hits */
69678- atomic_t s_bal_breaks; /* too long searches */
69679- atomic_t s_bal_2orders; /* 2^order hits */
69680+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
69681+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
69682+ atomic_unchecked_t s_bal_allocated; /* in blocks */
69683+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
69684+ atomic_unchecked_t s_bal_goals; /* goal hits */
69685+ atomic_unchecked_t s_bal_breaks; /* too long searches */
69686+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
69687 spinlock_t s_bal_lock;
69688 unsigned long s_mb_buddies_generated;
69689 unsigned long long s_mb_generation_time;
69690- atomic_t s_mb_lost_chunks;
69691- atomic_t s_mb_preallocated;
69692- atomic_t s_mb_discarded;
69693+ atomic_unchecked_t s_mb_lost_chunks;
69694+ atomic_unchecked_t s_mb_preallocated;
69695+ atomic_unchecked_t s_mb_discarded;
69696 atomic_t s_lock_busy;
69697
69698 /* locality groups */
69699diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
69700index 8d1e602..abf497b 100644
69701--- a/fs/ext4/mballoc.c
69702+++ b/fs/ext4/mballoc.c
69703@@ -1901,7 +1901,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
69704 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
69705
69706 if (EXT4_SB(sb)->s_mb_stats)
69707- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
69708+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
69709
69710 break;
69711 }
69712@@ -2211,7 +2211,7 @@ repeat:
69713 ac->ac_status = AC_STATUS_CONTINUE;
69714 ac->ac_flags |= EXT4_MB_HINT_FIRST;
69715 cr = 3;
69716- atomic_inc(&sbi->s_mb_lost_chunks);
69717+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
69718 goto repeat;
69719 }
69720 }
69721@@ -2716,25 +2716,25 @@ int ext4_mb_release(struct super_block *sb)
69722 if (sbi->s_mb_stats) {
69723 ext4_msg(sb, KERN_INFO,
69724 "mballoc: %u blocks %u reqs (%u success)",
69725- atomic_read(&sbi->s_bal_allocated),
69726- atomic_read(&sbi->s_bal_reqs),
69727- atomic_read(&sbi->s_bal_success));
69728+ atomic_read_unchecked(&sbi->s_bal_allocated),
69729+ atomic_read_unchecked(&sbi->s_bal_reqs),
69730+ atomic_read_unchecked(&sbi->s_bal_success));
69731 ext4_msg(sb, KERN_INFO,
69732 "mballoc: %u extents scanned, %u goal hits, "
69733 "%u 2^N hits, %u breaks, %u lost",
69734- atomic_read(&sbi->s_bal_ex_scanned),
69735- atomic_read(&sbi->s_bal_goals),
69736- atomic_read(&sbi->s_bal_2orders),
69737- atomic_read(&sbi->s_bal_breaks),
69738- atomic_read(&sbi->s_mb_lost_chunks));
69739+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
69740+ atomic_read_unchecked(&sbi->s_bal_goals),
69741+ atomic_read_unchecked(&sbi->s_bal_2orders),
69742+ atomic_read_unchecked(&sbi->s_bal_breaks),
69743+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
69744 ext4_msg(sb, KERN_INFO,
69745 "mballoc: %lu generated and it took %Lu",
69746 sbi->s_mb_buddies_generated,
69747 sbi->s_mb_generation_time);
69748 ext4_msg(sb, KERN_INFO,
69749 "mballoc: %u preallocated, %u discarded",
69750- atomic_read(&sbi->s_mb_preallocated),
69751- atomic_read(&sbi->s_mb_discarded));
69752+ atomic_read_unchecked(&sbi->s_mb_preallocated),
69753+ atomic_read_unchecked(&sbi->s_mb_discarded));
69754 }
69755
69756 free_percpu(sbi->s_locality_groups);
69757@@ -3190,16 +3190,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
69758 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
69759
69760 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
69761- atomic_inc(&sbi->s_bal_reqs);
69762- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
69763+ atomic_inc_unchecked(&sbi->s_bal_reqs);
69764+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
69765 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
69766- atomic_inc(&sbi->s_bal_success);
69767- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
69768+ atomic_inc_unchecked(&sbi->s_bal_success);
69769+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
69770 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
69771 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
69772- atomic_inc(&sbi->s_bal_goals);
69773+ atomic_inc_unchecked(&sbi->s_bal_goals);
69774 if (ac->ac_found > sbi->s_mb_max_to_scan)
69775- atomic_inc(&sbi->s_bal_breaks);
69776+ atomic_inc_unchecked(&sbi->s_bal_breaks);
69777 }
69778
69779 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
69780@@ -3626,7 +3626,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
69781 trace_ext4_mb_new_inode_pa(ac, pa);
69782
69783 ext4_mb_use_inode_pa(ac, pa);
69784- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
69785+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
69786
69787 ei = EXT4_I(ac->ac_inode);
69788 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
69789@@ -3686,7 +3686,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
69790 trace_ext4_mb_new_group_pa(ac, pa);
69791
69792 ext4_mb_use_group_pa(ac, pa);
69793- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
69794+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
69795
69796 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
69797 lg = ac->ac_lg;
69798@@ -3775,7 +3775,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
69799 * from the bitmap and continue.
69800 */
69801 }
69802- atomic_add(free, &sbi->s_mb_discarded);
69803+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
69804
69805 return err;
69806 }
69807@@ -3793,7 +3793,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
69808 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
69809 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
69810 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
69811- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
69812+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
69813 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
69814
69815 return 0;
69816diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
69817index 8313ca3..8a37d08 100644
69818--- a/fs/ext4/mmp.c
69819+++ b/fs/ext4/mmp.c
69820@@ -111,7 +111,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
69821 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
69822 const char *function, unsigned int line, const char *msg)
69823 {
69824- __ext4_warning(sb, function, line, msg);
69825+ __ext4_warning(sb, function, line, "%s", msg);
69826 __ext4_warning(sb, function, line,
69827 "MMP failure info: last update time: %llu, last update "
69828 "node: %s, last update device: %s\n",
69829diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
69830index 8a8ec62..1b02de5 100644
69831--- a/fs/ext4/resize.c
69832+++ b/fs/ext4/resize.c
69833@@ -413,7 +413,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
69834
69835 ext4_debug("mark blocks [%llu/%u] used\n", block, count);
69836 for (count2 = count; count > 0; count -= count2, block += count2) {
69837- ext4_fsblk_t start;
69838+ ext4_fsblk_t start, diff;
69839 struct buffer_head *bh;
69840 ext4_group_t group;
69841 int err;
69842@@ -422,10 +422,6 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
69843 start = ext4_group_first_block_no(sb, group);
69844 group -= flex_gd->groups[0].group;
69845
69846- count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
69847- if (count2 > count)
69848- count2 = count;
69849-
69850 if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
69851 BUG_ON(flex_gd->count > 1);
69852 continue;
69853@@ -443,9 +439,15 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
69854 err = ext4_journal_get_write_access(handle, bh);
69855 if (err)
69856 return err;
69857+
69858+ diff = block - start;
69859+ count2 = EXT4_BLOCKS_PER_GROUP(sb) - diff;
69860+ if (count2 > count)
69861+ count2 = count;
69862+
69863 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block,
69864- block - start, count2);
69865- ext4_set_bits(bh->b_data, block - start, count2);
69866+ diff, count2);
69867+ ext4_set_bits(bh->b_data, diff, count2);
69868
69869 err = ext4_handle_dirty_metadata(handle, NULL, bh);
69870 if (unlikely(err))
69871diff --git a/fs/ext4/super.c b/fs/ext4/super.c
69872index e061e66..87bc092 100644
69873--- a/fs/ext4/super.c
69874+++ b/fs/ext4/super.c
69875@@ -1243,7 +1243,7 @@ static ext4_fsblk_t get_sb_block(void **data)
69876 }
69877
69878 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
69879-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
69880+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
69881 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
69882
69883 #ifdef CONFIG_QUOTA
69884@@ -2443,7 +2443,7 @@ struct ext4_attr {
69885 int offset;
69886 int deprecated_val;
69887 } u;
69888-};
69889+} __do_const;
69890
69891 static int parse_strtoull(const char *buf,
69892 unsigned long long max, unsigned long long *value)
69893diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
69894index 1e09fc7..0400dd4 100644
69895--- a/fs/ext4/xattr.c
69896+++ b/fs/ext4/xattr.c
69897@@ -399,7 +399,7 @@ static int
69898 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
69899 char *buffer, size_t buffer_size)
69900 {
69901- size_t rest = buffer_size;
69902+ size_t rest = buffer_size, total_size = 0;
69903
69904 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
69905 const struct xattr_handler *handler =
69906@@ -416,9 +416,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
69907 buffer += size;
69908 }
69909 rest -= size;
69910+ total_size += size;
69911 }
69912 }
69913- return buffer_size - rest;
69914+ return total_size;
69915 }
69916
69917 static int
69918diff --git a/fs/fcntl.c b/fs/fcntl.c
69919index ee85cd4..9dd0d20 100644
69920--- a/fs/fcntl.c
69921+++ b/fs/fcntl.c
69922@@ -102,6 +102,10 @@ void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
69923 int force)
69924 {
69925 security_file_set_fowner(filp);
69926+ if (gr_handle_chroot_fowner(pid, type))
69927+ return;
69928+ if (gr_check_protected_task_fowner(pid, type))
69929+ return;
69930 f_modown(filp, pid, type, force);
69931 }
69932 EXPORT_SYMBOL(__f_setown);
69933diff --git a/fs/fhandle.c b/fs/fhandle.c
69934index 999ff5c..2281df9 100644
69935--- a/fs/fhandle.c
69936+++ b/fs/fhandle.c
69937@@ -8,6 +8,7 @@
69938 #include <linux/fs_struct.h>
69939 #include <linux/fsnotify.h>
69940 #include <linux/personality.h>
69941+#include <linux/grsecurity.h>
69942 #include <asm/uaccess.h>
69943 #include "internal.h"
69944 #include "mount.h"
69945@@ -67,8 +68,7 @@ static long do_sys_name_to_handle(struct path *path,
69946 } else
69947 retval = 0;
69948 /* copy the mount id */
69949- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
69950- sizeof(*mnt_id)) ||
69951+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
69952 copy_to_user(ufh, handle,
69953 sizeof(struct file_handle) + handle_bytes))
69954 retval = -EFAULT;
69955@@ -175,7 +175,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
69956 * the directory. Ideally we would like CAP_DAC_SEARCH.
69957 * But we don't have that
69958 */
69959- if (!capable(CAP_DAC_READ_SEARCH)) {
69960+ if (!capable(CAP_DAC_READ_SEARCH) || !gr_chroot_fhandle()) {
69961 retval = -EPERM;
69962 goto out_err;
69963 }
69964@@ -195,8 +195,9 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
69965 goto out_err;
69966 }
69967 /* copy the full handle */
69968- if (copy_from_user(handle, ufh,
69969- sizeof(struct file_handle) +
69970+ *handle = f_handle;
69971+ if (copy_from_user(&handle->f_handle,
69972+ &ufh->f_handle,
69973 f_handle.handle_bytes)) {
69974 retval = -EFAULT;
69975 goto out_handle;
69976diff --git a/fs/file.c b/fs/file.c
69977index ee738ea..f6c15629 100644
69978--- a/fs/file.c
69979+++ b/fs/file.c
69980@@ -16,6 +16,7 @@
69981 #include <linux/slab.h>
69982 #include <linux/vmalloc.h>
69983 #include <linux/file.h>
69984+#include <linux/security.h>
69985 #include <linux/fdtable.h>
69986 #include <linux/bitops.h>
69987 #include <linux/interrupt.h>
69988@@ -139,7 +140,7 @@ out:
69989 * Return <0 error code on error; 1 on successful completion.
69990 * The files->file_lock should be held on entry, and will be held on exit.
69991 */
69992-static int expand_fdtable(struct files_struct *files, int nr)
69993+static int expand_fdtable(struct files_struct *files, unsigned int nr)
69994 __releases(files->file_lock)
69995 __acquires(files->file_lock)
69996 {
69997@@ -184,7 +185,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
69998 * expanded and execution may have blocked.
69999 * The files->file_lock should be held on entry, and will be held on exit.
70000 */
70001-static int expand_files(struct files_struct *files, int nr)
70002+static int expand_files(struct files_struct *files, unsigned int nr)
70003 {
70004 struct fdtable *fdt;
70005
70006@@ -800,6 +801,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
70007 if (!file)
70008 return __close_fd(files, fd);
70009
70010+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
70011 if (fd >= rlimit(RLIMIT_NOFILE))
70012 return -EBADF;
70013
70014@@ -826,6 +828,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
70015 if (unlikely(oldfd == newfd))
70016 return -EINVAL;
70017
70018+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
70019 if (newfd >= rlimit(RLIMIT_NOFILE))
70020 return -EBADF;
70021
70022@@ -881,6 +884,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
70023 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
70024 {
70025 int err;
70026+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
70027 if (from >= rlimit(RLIMIT_NOFILE))
70028 return -EINVAL;
70029 err = alloc_fd(from, flags);
70030diff --git a/fs/filesystems.c b/fs/filesystems.c
70031index 5797d45..7d7d79a 100644
70032--- a/fs/filesystems.c
70033+++ b/fs/filesystems.c
70034@@ -275,7 +275,11 @@ struct file_system_type *get_fs_type(const char *name)
70035 int len = dot ? dot - name : strlen(name);
70036
70037 fs = __get_fs_type(name, len);
70038+#ifdef CONFIG_GRKERNSEC_MODHARDEN
70039+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
70040+#else
70041 if (!fs && (request_module("fs-%.*s", len, name) == 0))
70042+#endif
70043 fs = __get_fs_type(name, len);
70044
70045 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
70046diff --git a/fs/fs_struct.c b/fs/fs_struct.c
70047index 7dca743..1ff87ae 100644
70048--- a/fs/fs_struct.c
70049+++ b/fs/fs_struct.c
70050@@ -4,6 +4,7 @@
70051 #include <linux/path.h>
70052 #include <linux/slab.h>
70053 #include <linux/fs_struct.h>
70054+#include <linux/grsecurity.h>
70055 #include "internal.h"
70056
70057 /*
70058@@ -15,14 +16,18 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
70059 struct path old_root;
70060
70061 path_get(path);
70062+ gr_inc_chroot_refcnts(path->dentry, path->mnt);
70063 spin_lock(&fs->lock);
70064 write_seqcount_begin(&fs->seq);
70065 old_root = fs->root;
70066 fs->root = *path;
70067+ gr_set_chroot_entries(current, path);
70068 write_seqcount_end(&fs->seq);
70069 spin_unlock(&fs->lock);
70070- if (old_root.dentry)
70071+ if (old_root.dentry) {
70072+ gr_dec_chroot_refcnts(old_root.dentry, old_root.mnt);
70073 path_put(&old_root);
70074+ }
70075 }
70076
70077 /*
70078@@ -67,6 +72,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
70079 int hits = 0;
70080 spin_lock(&fs->lock);
70081 write_seqcount_begin(&fs->seq);
70082+ /* this root replacement is only done by pivot_root,
70083+ leave grsec's chroot tagging alone for this task
70084+ so that a pivoted root isn't treated as a chroot
70085+ */
70086 hits += replace_path(&fs->root, old_root, new_root);
70087 hits += replace_path(&fs->pwd, old_root, new_root);
70088 write_seqcount_end(&fs->seq);
70089@@ -85,6 +94,7 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
70090
70091 void free_fs_struct(struct fs_struct *fs)
70092 {
70093+ gr_dec_chroot_refcnts(fs->root.dentry, fs->root.mnt);
70094 path_put(&fs->root);
70095 path_put(&fs->pwd);
70096 kmem_cache_free(fs_cachep, fs);
70097@@ -99,7 +109,8 @@ void exit_fs(struct task_struct *tsk)
70098 task_lock(tsk);
70099 spin_lock(&fs->lock);
70100 tsk->fs = NULL;
70101- kill = !--fs->users;
70102+ gr_clear_chroot_entries(tsk);
70103+ kill = !atomic_dec_return(&fs->users);
70104 spin_unlock(&fs->lock);
70105 task_unlock(tsk);
70106 if (kill)
70107@@ -112,7 +123,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
70108 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
70109 /* We don't need to lock fs - think why ;-) */
70110 if (fs) {
70111- fs->users = 1;
70112+ atomic_set(&fs->users, 1);
70113 fs->in_exec = 0;
70114 spin_lock_init(&fs->lock);
70115 seqcount_init(&fs->seq);
70116@@ -121,9 +132,13 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
70117 spin_lock(&old->lock);
70118 fs->root = old->root;
70119 path_get(&fs->root);
70120+ /* instead of calling gr_set_chroot_entries here,
70121+ we call it from every caller of this function
70122+ */
70123 fs->pwd = old->pwd;
70124 path_get(&fs->pwd);
70125 spin_unlock(&old->lock);
70126+ gr_inc_chroot_refcnts(fs->root.dentry, fs->root.mnt);
70127 }
70128 return fs;
70129 }
70130@@ -139,8 +154,9 @@ int unshare_fs_struct(void)
70131
70132 task_lock(current);
70133 spin_lock(&fs->lock);
70134- kill = !--fs->users;
70135+ kill = !atomic_dec_return(&fs->users);
70136 current->fs = new_fs;
70137+ gr_set_chroot_entries(current, &new_fs->root);
70138 spin_unlock(&fs->lock);
70139 task_unlock(current);
70140
70141@@ -153,13 +169,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
70142
70143 int current_umask(void)
70144 {
70145- return current->fs->umask;
70146+ return current->fs->umask | gr_acl_umask();
70147 }
70148 EXPORT_SYMBOL(current_umask);
70149
70150 /* to be mentioned only in INIT_TASK */
70151 struct fs_struct init_fs = {
70152- .users = 1,
70153+ .users = ATOMIC_INIT(1),
70154 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
70155 .seq = SEQCNT_ZERO(init_fs.seq),
70156 .umask = 0022,
70157diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
70158index 89acec7..a575262 100644
70159--- a/fs/fscache/cookie.c
70160+++ b/fs/fscache/cookie.c
70161@@ -19,7 +19,7 @@
70162
70163 struct kmem_cache *fscache_cookie_jar;
70164
70165-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
70166+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
70167
70168 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
70169 static int fscache_alloc_object(struct fscache_cache *cache,
70170@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
70171 parent ? (char *) parent->def->name : "<no-parent>",
70172 def->name, netfs_data, enable);
70173
70174- fscache_stat(&fscache_n_acquires);
70175+ fscache_stat_unchecked(&fscache_n_acquires);
70176
70177 /* if there's no parent cookie, then we don't create one here either */
70178 if (!parent) {
70179- fscache_stat(&fscache_n_acquires_null);
70180+ fscache_stat_unchecked(&fscache_n_acquires_null);
70181 _leave(" [no parent]");
70182 return NULL;
70183 }
70184@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
70185 /* allocate and initialise a cookie */
70186 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
70187 if (!cookie) {
70188- fscache_stat(&fscache_n_acquires_oom);
70189+ fscache_stat_unchecked(&fscache_n_acquires_oom);
70190 _leave(" [ENOMEM]");
70191 return NULL;
70192 }
70193@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
70194
70195 switch (cookie->def->type) {
70196 case FSCACHE_COOKIE_TYPE_INDEX:
70197- fscache_stat(&fscache_n_cookie_index);
70198+ fscache_stat_unchecked(&fscache_n_cookie_index);
70199 break;
70200 case FSCACHE_COOKIE_TYPE_DATAFILE:
70201- fscache_stat(&fscache_n_cookie_data);
70202+ fscache_stat_unchecked(&fscache_n_cookie_data);
70203 break;
70204 default:
70205- fscache_stat(&fscache_n_cookie_special);
70206+ fscache_stat_unchecked(&fscache_n_cookie_special);
70207 break;
70208 }
70209
70210@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
70211 } else {
70212 atomic_dec(&parent->n_children);
70213 __fscache_cookie_put(cookie);
70214- fscache_stat(&fscache_n_acquires_nobufs);
70215+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
70216 _leave(" = NULL");
70217 return NULL;
70218 }
70219@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
70220 }
70221 }
70222
70223- fscache_stat(&fscache_n_acquires_ok);
70224+ fscache_stat_unchecked(&fscache_n_acquires_ok);
70225 _leave(" = %p", cookie);
70226 return cookie;
70227 }
70228@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
70229 cache = fscache_select_cache_for_object(cookie->parent);
70230 if (!cache) {
70231 up_read(&fscache_addremove_sem);
70232- fscache_stat(&fscache_n_acquires_no_cache);
70233+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
70234 _leave(" = -ENOMEDIUM [no cache]");
70235 return -ENOMEDIUM;
70236 }
70237@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
70238 object = cache->ops->alloc_object(cache, cookie);
70239 fscache_stat_d(&fscache_n_cop_alloc_object);
70240 if (IS_ERR(object)) {
70241- fscache_stat(&fscache_n_object_no_alloc);
70242+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
70243 ret = PTR_ERR(object);
70244 goto error;
70245 }
70246
70247- fscache_stat(&fscache_n_object_alloc);
70248+ fscache_stat_unchecked(&fscache_n_object_alloc);
70249
70250- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
70251+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
70252
70253 _debug("ALLOC OBJ%x: %s {%lx}",
70254 object->debug_id, cookie->def->name, object->events);
70255@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
70256
70257 _enter("{%s}", cookie->def->name);
70258
70259- fscache_stat(&fscache_n_invalidates);
70260+ fscache_stat_unchecked(&fscache_n_invalidates);
70261
70262 /* Only permit invalidation of data files. Invalidating an index will
70263 * require the caller to release all its attachments to the tree rooted
70264@@ -476,10 +476,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
70265 {
70266 struct fscache_object *object;
70267
70268- fscache_stat(&fscache_n_updates);
70269+ fscache_stat_unchecked(&fscache_n_updates);
70270
70271 if (!cookie) {
70272- fscache_stat(&fscache_n_updates_null);
70273+ fscache_stat_unchecked(&fscache_n_updates_null);
70274 _leave(" [no cookie]");
70275 return;
70276 }
70277@@ -580,12 +580,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
70278 */
70279 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
70280 {
70281- fscache_stat(&fscache_n_relinquishes);
70282+ fscache_stat_unchecked(&fscache_n_relinquishes);
70283 if (retire)
70284- fscache_stat(&fscache_n_relinquishes_retire);
70285+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
70286
70287 if (!cookie) {
70288- fscache_stat(&fscache_n_relinquishes_null);
70289+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
70290 _leave(" [no cookie]");
70291 return;
70292 }
70293@@ -686,7 +686,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
70294 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
70295 goto inconsistent;
70296
70297- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
70298+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
70299
70300 __fscache_use_cookie(cookie);
70301 if (fscache_submit_op(object, op) < 0)
70302diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
70303index 7872a62..d91b19f 100644
70304--- a/fs/fscache/internal.h
70305+++ b/fs/fscache/internal.h
70306@@ -137,8 +137,8 @@ extern void fscache_operation_gc(struct work_struct *);
70307 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
70308 extern int fscache_wait_for_operation_activation(struct fscache_object *,
70309 struct fscache_operation *,
70310- atomic_t *,
70311- atomic_t *,
70312+ atomic_unchecked_t *,
70313+ atomic_unchecked_t *,
70314 void (*)(struct fscache_operation *));
70315 extern void fscache_invalidate_writes(struct fscache_cookie *);
70316
70317@@ -157,101 +157,101 @@ extern void fscache_proc_cleanup(void);
70318 * stats.c
70319 */
70320 #ifdef CONFIG_FSCACHE_STATS
70321-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
70322-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
70323+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
70324+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
70325
70326-extern atomic_t fscache_n_op_pend;
70327-extern atomic_t fscache_n_op_run;
70328-extern atomic_t fscache_n_op_enqueue;
70329-extern atomic_t fscache_n_op_deferred_release;
70330-extern atomic_t fscache_n_op_release;
70331-extern atomic_t fscache_n_op_gc;
70332-extern atomic_t fscache_n_op_cancelled;
70333-extern atomic_t fscache_n_op_rejected;
70334+extern atomic_unchecked_t fscache_n_op_pend;
70335+extern atomic_unchecked_t fscache_n_op_run;
70336+extern atomic_unchecked_t fscache_n_op_enqueue;
70337+extern atomic_unchecked_t fscache_n_op_deferred_release;
70338+extern atomic_unchecked_t fscache_n_op_release;
70339+extern atomic_unchecked_t fscache_n_op_gc;
70340+extern atomic_unchecked_t fscache_n_op_cancelled;
70341+extern atomic_unchecked_t fscache_n_op_rejected;
70342
70343-extern atomic_t fscache_n_attr_changed;
70344-extern atomic_t fscache_n_attr_changed_ok;
70345-extern atomic_t fscache_n_attr_changed_nobufs;
70346-extern atomic_t fscache_n_attr_changed_nomem;
70347-extern atomic_t fscache_n_attr_changed_calls;
70348+extern atomic_unchecked_t fscache_n_attr_changed;
70349+extern atomic_unchecked_t fscache_n_attr_changed_ok;
70350+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
70351+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
70352+extern atomic_unchecked_t fscache_n_attr_changed_calls;
70353
70354-extern atomic_t fscache_n_allocs;
70355-extern atomic_t fscache_n_allocs_ok;
70356-extern atomic_t fscache_n_allocs_wait;
70357-extern atomic_t fscache_n_allocs_nobufs;
70358-extern atomic_t fscache_n_allocs_intr;
70359-extern atomic_t fscache_n_allocs_object_dead;
70360-extern atomic_t fscache_n_alloc_ops;
70361-extern atomic_t fscache_n_alloc_op_waits;
70362+extern atomic_unchecked_t fscache_n_allocs;
70363+extern atomic_unchecked_t fscache_n_allocs_ok;
70364+extern atomic_unchecked_t fscache_n_allocs_wait;
70365+extern atomic_unchecked_t fscache_n_allocs_nobufs;
70366+extern atomic_unchecked_t fscache_n_allocs_intr;
70367+extern atomic_unchecked_t fscache_n_allocs_object_dead;
70368+extern atomic_unchecked_t fscache_n_alloc_ops;
70369+extern atomic_unchecked_t fscache_n_alloc_op_waits;
70370
70371-extern atomic_t fscache_n_retrievals;
70372-extern atomic_t fscache_n_retrievals_ok;
70373-extern atomic_t fscache_n_retrievals_wait;
70374-extern atomic_t fscache_n_retrievals_nodata;
70375-extern atomic_t fscache_n_retrievals_nobufs;
70376-extern atomic_t fscache_n_retrievals_intr;
70377-extern atomic_t fscache_n_retrievals_nomem;
70378-extern atomic_t fscache_n_retrievals_object_dead;
70379-extern atomic_t fscache_n_retrieval_ops;
70380-extern atomic_t fscache_n_retrieval_op_waits;
70381+extern atomic_unchecked_t fscache_n_retrievals;
70382+extern atomic_unchecked_t fscache_n_retrievals_ok;
70383+extern atomic_unchecked_t fscache_n_retrievals_wait;
70384+extern atomic_unchecked_t fscache_n_retrievals_nodata;
70385+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
70386+extern atomic_unchecked_t fscache_n_retrievals_intr;
70387+extern atomic_unchecked_t fscache_n_retrievals_nomem;
70388+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
70389+extern atomic_unchecked_t fscache_n_retrieval_ops;
70390+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
70391
70392-extern atomic_t fscache_n_stores;
70393-extern atomic_t fscache_n_stores_ok;
70394-extern atomic_t fscache_n_stores_again;
70395-extern atomic_t fscache_n_stores_nobufs;
70396-extern atomic_t fscache_n_stores_oom;
70397-extern atomic_t fscache_n_store_ops;
70398-extern atomic_t fscache_n_store_calls;
70399-extern atomic_t fscache_n_store_pages;
70400-extern atomic_t fscache_n_store_radix_deletes;
70401-extern atomic_t fscache_n_store_pages_over_limit;
70402+extern atomic_unchecked_t fscache_n_stores;
70403+extern atomic_unchecked_t fscache_n_stores_ok;
70404+extern atomic_unchecked_t fscache_n_stores_again;
70405+extern atomic_unchecked_t fscache_n_stores_nobufs;
70406+extern atomic_unchecked_t fscache_n_stores_oom;
70407+extern atomic_unchecked_t fscache_n_store_ops;
70408+extern atomic_unchecked_t fscache_n_store_calls;
70409+extern atomic_unchecked_t fscache_n_store_pages;
70410+extern atomic_unchecked_t fscache_n_store_radix_deletes;
70411+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
70412
70413-extern atomic_t fscache_n_store_vmscan_not_storing;
70414-extern atomic_t fscache_n_store_vmscan_gone;
70415-extern atomic_t fscache_n_store_vmscan_busy;
70416-extern atomic_t fscache_n_store_vmscan_cancelled;
70417-extern atomic_t fscache_n_store_vmscan_wait;
70418+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
70419+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
70420+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
70421+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
70422+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
70423
70424-extern atomic_t fscache_n_marks;
70425-extern atomic_t fscache_n_uncaches;
70426+extern atomic_unchecked_t fscache_n_marks;
70427+extern atomic_unchecked_t fscache_n_uncaches;
70428
70429-extern atomic_t fscache_n_acquires;
70430-extern atomic_t fscache_n_acquires_null;
70431-extern atomic_t fscache_n_acquires_no_cache;
70432-extern atomic_t fscache_n_acquires_ok;
70433-extern atomic_t fscache_n_acquires_nobufs;
70434-extern atomic_t fscache_n_acquires_oom;
70435+extern atomic_unchecked_t fscache_n_acquires;
70436+extern atomic_unchecked_t fscache_n_acquires_null;
70437+extern atomic_unchecked_t fscache_n_acquires_no_cache;
70438+extern atomic_unchecked_t fscache_n_acquires_ok;
70439+extern atomic_unchecked_t fscache_n_acquires_nobufs;
70440+extern atomic_unchecked_t fscache_n_acquires_oom;
70441
70442-extern atomic_t fscache_n_invalidates;
70443-extern atomic_t fscache_n_invalidates_run;
70444+extern atomic_unchecked_t fscache_n_invalidates;
70445+extern atomic_unchecked_t fscache_n_invalidates_run;
70446
70447-extern atomic_t fscache_n_updates;
70448-extern atomic_t fscache_n_updates_null;
70449-extern atomic_t fscache_n_updates_run;
70450+extern atomic_unchecked_t fscache_n_updates;
70451+extern atomic_unchecked_t fscache_n_updates_null;
70452+extern atomic_unchecked_t fscache_n_updates_run;
70453
70454-extern atomic_t fscache_n_relinquishes;
70455-extern atomic_t fscache_n_relinquishes_null;
70456-extern atomic_t fscache_n_relinquishes_waitcrt;
70457-extern atomic_t fscache_n_relinquishes_retire;
70458+extern atomic_unchecked_t fscache_n_relinquishes;
70459+extern atomic_unchecked_t fscache_n_relinquishes_null;
70460+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
70461+extern atomic_unchecked_t fscache_n_relinquishes_retire;
70462
70463-extern atomic_t fscache_n_cookie_index;
70464-extern atomic_t fscache_n_cookie_data;
70465-extern atomic_t fscache_n_cookie_special;
70466+extern atomic_unchecked_t fscache_n_cookie_index;
70467+extern atomic_unchecked_t fscache_n_cookie_data;
70468+extern atomic_unchecked_t fscache_n_cookie_special;
70469
70470-extern atomic_t fscache_n_object_alloc;
70471-extern atomic_t fscache_n_object_no_alloc;
70472-extern atomic_t fscache_n_object_lookups;
70473-extern atomic_t fscache_n_object_lookups_negative;
70474-extern atomic_t fscache_n_object_lookups_positive;
70475-extern atomic_t fscache_n_object_lookups_timed_out;
70476-extern atomic_t fscache_n_object_created;
70477-extern atomic_t fscache_n_object_avail;
70478-extern atomic_t fscache_n_object_dead;
70479+extern atomic_unchecked_t fscache_n_object_alloc;
70480+extern atomic_unchecked_t fscache_n_object_no_alloc;
70481+extern atomic_unchecked_t fscache_n_object_lookups;
70482+extern atomic_unchecked_t fscache_n_object_lookups_negative;
70483+extern atomic_unchecked_t fscache_n_object_lookups_positive;
70484+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
70485+extern atomic_unchecked_t fscache_n_object_created;
70486+extern atomic_unchecked_t fscache_n_object_avail;
70487+extern atomic_unchecked_t fscache_n_object_dead;
70488
70489-extern atomic_t fscache_n_checkaux_none;
70490-extern atomic_t fscache_n_checkaux_okay;
70491-extern atomic_t fscache_n_checkaux_update;
70492-extern atomic_t fscache_n_checkaux_obsolete;
70493+extern atomic_unchecked_t fscache_n_checkaux_none;
70494+extern atomic_unchecked_t fscache_n_checkaux_okay;
70495+extern atomic_unchecked_t fscache_n_checkaux_update;
70496+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
70497
70498 extern atomic_t fscache_n_cop_alloc_object;
70499 extern atomic_t fscache_n_cop_lookup_object;
70500@@ -276,6 +276,11 @@ static inline void fscache_stat(atomic_t *stat)
70501 atomic_inc(stat);
70502 }
70503
70504+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
70505+{
70506+ atomic_inc_unchecked(stat);
70507+}
70508+
70509 static inline void fscache_stat_d(atomic_t *stat)
70510 {
70511 atomic_dec(stat);
70512@@ -288,6 +293,7 @@ extern const struct file_operations fscache_stats_fops;
70513
70514 #define __fscache_stat(stat) (NULL)
70515 #define fscache_stat(stat) do {} while (0)
70516+#define fscache_stat_unchecked(stat) do {} while (0)
70517 #define fscache_stat_d(stat) do {} while (0)
70518 #endif
70519
70520diff --git a/fs/fscache/object.c b/fs/fscache/object.c
70521index da032da..0076ce7 100644
70522--- a/fs/fscache/object.c
70523+++ b/fs/fscache/object.c
70524@@ -454,7 +454,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
70525 _debug("LOOKUP \"%s\" in \"%s\"",
70526 cookie->def->name, object->cache->tag->name);
70527
70528- fscache_stat(&fscache_n_object_lookups);
70529+ fscache_stat_unchecked(&fscache_n_object_lookups);
70530 fscache_stat(&fscache_n_cop_lookup_object);
70531 ret = object->cache->ops->lookup_object(object);
70532 fscache_stat_d(&fscache_n_cop_lookup_object);
70533@@ -464,7 +464,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
70534 if (ret == -ETIMEDOUT) {
70535 /* probably stuck behind another object, so move this one to
70536 * the back of the queue */
70537- fscache_stat(&fscache_n_object_lookups_timed_out);
70538+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
70539 _leave(" [timeout]");
70540 return NO_TRANSIT;
70541 }
70542@@ -492,7 +492,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
70543 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
70544
70545 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
70546- fscache_stat(&fscache_n_object_lookups_negative);
70547+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
70548
70549 /* Allow write requests to begin stacking up and read requests to begin
70550 * returning ENODATA.
70551@@ -527,7 +527,7 @@ void fscache_obtained_object(struct fscache_object *object)
70552 /* if we were still looking up, then we must have a positive lookup
70553 * result, in which case there may be data available */
70554 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
70555- fscache_stat(&fscache_n_object_lookups_positive);
70556+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
70557
70558 /* We do (presumably) have data */
70559 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
70560@@ -539,7 +539,7 @@ void fscache_obtained_object(struct fscache_object *object)
70561 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
70562 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
70563 } else {
70564- fscache_stat(&fscache_n_object_created);
70565+ fscache_stat_unchecked(&fscache_n_object_created);
70566 }
70567
70568 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
70569@@ -575,7 +575,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
70570 fscache_stat_d(&fscache_n_cop_lookup_complete);
70571
70572 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
70573- fscache_stat(&fscache_n_object_avail);
70574+ fscache_stat_unchecked(&fscache_n_object_avail);
70575
70576 _leave("");
70577 return transit_to(JUMPSTART_DEPS);
70578@@ -722,7 +722,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
70579
70580 /* this just shifts the object release to the work processor */
70581 fscache_put_object(object);
70582- fscache_stat(&fscache_n_object_dead);
70583+ fscache_stat_unchecked(&fscache_n_object_dead);
70584
70585 _leave("");
70586 return transit_to(OBJECT_DEAD);
70587@@ -887,7 +887,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
70588 enum fscache_checkaux result;
70589
70590 if (!object->cookie->def->check_aux) {
70591- fscache_stat(&fscache_n_checkaux_none);
70592+ fscache_stat_unchecked(&fscache_n_checkaux_none);
70593 return FSCACHE_CHECKAUX_OKAY;
70594 }
70595
70596@@ -896,17 +896,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
70597 switch (result) {
70598 /* entry okay as is */
70599 case FSCACHE_CHECKAUX_OKAY:
70600- fscache_stat(&fscache_n_checkaux_okay);
70601+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
70602 break;
70603
70604 /* entry requires update */
70605 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
70606- fscache_stat(&fscache_n_checkaux_update);
70607+ fscache_stat_unchecked(&fscache_n_checkaux_update);
70608 break;
70609
70610 /* entry requires deletion */
70611 case FSCACHE_CHECKAUX_OBSOLETE:
70612- fscache_stat(&fscache_n_checkaux_obsolete);
70613+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
70614 break;
70615
70616 default:
70617@@ -993,7 +993,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
70618 {
70619 const struct fscache_state *s;
70620
70621- fscache_stat(&fscache_n_invalidates_run);
70622+ fscache_stat_unchecked(&fscache_n_invalidates_run);
70623 fscache_stat(&fscache_n_cop_invalidate_object);
70624 s = _fscache_invalidate_object(object, event);
70625 fscache_stat_d(&fscache_n_cop_invalidate_object);
70626@@ -1008,7 +1008,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
70627 {
70628 _enter("{OBJ%x},%d", object->debug_id, event);
70629
70630- fscache_stat(&fscache_n_updates_run);
70631+ fscache_stat_unchecked(&fscache_n_updates_run);
70632 fscache_stat(&fscache_n_cop_update_object);
70633 object->cache->ops->update_object(object);
70634 fscache_stat_d(&fscache_n_cop_update_object);
70635diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
70636index e7b87a0..a85d47a 100644
70637--- a/fs/fscache/operation.c
70638+++ b/fs/fscache/operation.c
70639@@ -17,7 +17,7 @@
70640 #include <linux/slab.h>
70641 #include "internal.h"
70642
70643-atomic_t fscache_op_debug_id;
70644+atomic_unchecked_t fscache_op_debug_id;
70645 EXPORT_SYMBOL(fscache_op_debug_id);
70646
70647 /**
70648@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
70649 ASSERTCMP(atomic_read(&op->usage), >, 0);
70650 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
70651
70652- fscache_stat(&fscache_n_op_enqueue);
70653+ fscache_stat_unchecked(&fscache_n_op_enqueue);
70654 switch (op->flags & FSCACHE_OP_TYPE) {
70655 case FSCACHE_OP_ASYNC:
70656 _debug("queue async");
70657@@ -72,7 +72,7 @@ static void fscache_run_op(struct fscache_object *object,
70658 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
70659 if (op->processor)
70660 fscache_enqueue_operation(op);
70661- fscache_stat(&fscache_n_op_run);
70662+ fscache_stat_unchecked(&fscache_n_op_run);
70663 }
70664
70665 /*
70666@@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
70667 if (object->n_in_progress > 0) {
70668 atomic_inc(&op->usage);
70669 list_add_tail(&op->pend_link, &object->pending_ops);
70670- fscache_stat(&fscache_n_op_pend);
70671+ fscache_stat_unchecked(&fscache_n_op_pend);
70672 } else if (!list_empty(&object->pending_ops)) {
70673 atomic_inc(&op->usage);
70674 list_add_tail(&op->pend_link, &object->pending_ops);
70675- fscache_stat(&fscache_n_op_pend);
70676+ fscache_stat_unchecked(&fscache_n_op_pend);
70677 fscache_start_operations(object);
70678 } else {
70679 ASSERTCMP(object->n_in_progress, ==, 0);
70680@@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
70681 object->n_exclusive++; /* reads and writes must wait */
70682 atomic_inc(&op->usage);
70683 list_add_tail(&op->pend_link, &object->pending_ops);
70684- fscache_stat(&fscache_n_op_pend);
70685+ fscache_stat_unchecked(&fscache_n_op_pend);
70686 ret = 0;
70687 } else {
70688 /* If we're in any other state, there must have been an I/O
70689@@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_object *object,
70690 if (object->n_exclusive > 0) {
70691 atomic_inc(&op->usage);
70692 list_add_tail(&op->pend_link, &object->pending_ops);
70693- fscache_stat(&fscache_n_op_pend);
70694+ fscache_stat_unchecked(&fscache_n_op_pend);
70695 } else if (!list_empty(&object->pending_ops)) {
70696 atomic_inc(&op->usage);
70697 list_add_tail(&op->pend_link, &object->pending_ops);
70698- fscache_stat(&fscache_n_op_pend);
70699+ fscache_stat_unchecked(&fscache_n_op_pend);
70700 fscache_start_operations(object);
70701 } else {
70702 ASSERTCMP(object->n_exclusive, ==, 0);
70703@@ -227,10 +227,10 @@ int fscache_submit_op(struct fscache_object *object,
70704 object->n_ops++;
70705 atomic_inc(&op->usage);
70706 list_add_tail(&op->pend_link, &object->pending_ops);
70707- fscache_stat(&fscache_n_op_pend);
70708+ fscache_stat_unchecked(&fscache_n_op_pend);
70709 ret = 0;
70710 } else if (fscache_object_is_dying(object)) {
70711- fscache_stat(&fscache_n_op_rejected);
70712+ fscache_stat_unchecked(&fscache_n_op_rejected);
70713 op->state = FSCACHE_OP_ST_CANCELLED;
70714 ret = -ENOBUFS;
70715 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
70716@@ -309,7 +309,7 @@ int fscache_cancel_op(struct fscache_operation *op,
70717 ret = -EBUSY;
70718 if (op->state == FSCACHE_OP_ST_PENDING) {
70719 ASSERT(!list_empty(&op->pend_link));
70720- fscache_stat(&fscache_n_op_cancelled);
70721+ fscache_stat_unchecked(&fscache_n_op_cancelled);
70722 list_del_init(&op->pend_link);
70723 if (do_cancel)
70724 do_cancel(op);
70725@@ -341,7 +341,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
70726 while (!list_empty(&object->pending_ops)) {
70727 op = list_entry(object->pending_ops.next,
70728 struct fscache_operation, pend_link);
70729- fscache_stat(&fscache_n_op_cancelled);
70730+ fscache_stat_unchecked(&fscache_n_op_cancelled);
70731 list_del_init(&op->pend_link);
70732
70733 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
70734@@ -413,7 +413,7 @@ void fscache_put_operation(struct fscache_operation *op)
70735 op->state, ==, FSCACHE_OP_ST_CANCELLED);
70736 op->state = FSCACHE_OP_ST_DEAD;
70737
70738- fscache_stat(&fscache_n_op_release);
70739+ fscache_stat_unchecked(&fscache_n_op_release);
70740
70741 if (op->release) {
70742 op->release(op);
70743@@ -432,7 +432,7 @@ void fscache_put_operation(struct fscache_operation *op)
70744 * lock, and defer it otherwise */
70745 if (!spin_trylock(&object->lock)) {
70746 _debug("defer put");
70747- fscache_stat(&fscache_n_op_deferred_release);
70748+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
70749
70750 cache = object->cache;
70751 spin_lock(&cache->op_gc_list_lock);
70752@@ -485,7 +485,7 @@ void fscache_operation_gc(struct work_struct *work)
70753
70754 _debug("GC DEFERRED REL OBJ%x OP%x",
70755 object->debug_id, op->debug_id);
70756- fscache_stat(&fscache_n_op_gc);
70757+ fscache_stat_unchecked(&fscache_n_op_gc);
70758
70759 ASSERTCMP(atomic_read(&op->usage), ==, 0);
70760 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
70761diff --git a/fs/fscache/page.c b/fs/fscache/page.c
70762index de33b3f..8be4d29 100644
70763--- a/fs/fscache/page.c
70764+++ b/fs/fscache/page.c
70765@@ -74,7 +74,7 @@ try_again:
70766 val = radix_tree_lookup(&cookie->stores, page->index);
70767 if (!val) {
70768 rcu_read_unlock();
70769- fscache_stat(&fscache_n_store_vmscan_not_storing);
70770+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
70771 __fscache_uncache_page(cookie, page);
70772 return true;
70773 }
70774@@ -104,11 +104,11 @@ try_again:
70775 spin_unlock(&cookie->stores_lock);
70776
70777 if (xpage) {
70778- fscache_stat(&fscache_n_store_vmscan_cancelled);
70779- fscache_stat(&fscache_n_store_radix_deletes);
70780+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
70781+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
70782 ASSERTCMP(xpage, ==, page);
70783 } else {
70784- fscache_stat(&fscache_n_store_vmscan_gone);
70785+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
70786 }
70787
70788 wake_up_bit(&cookie->flags, 0);
70789@@ -123,11 +123,11 @@ page_busy:
70790 * sleeping on memory allocation, so we may need to impose a timeout
70791 * too. */
70792 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
70793- fscache_stat(&fscache_n_store_vmscan_busy);
70794+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
70795 return false;
70796 }
70797
70798- fscache_stat(&fscache_n_store_vmscan_wait);
70799+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
70800 if (!release_page_wait_timeout(cookie, page))
70801 _debug("fscache writeout timeout page: %p{%lx}",
70802 page, page->index);
70803@@ -156,7 +156,7 @@ static void fscache_end_page_write(struct fscache_object *object,
70804 FSCACHE_COOKIE_STORING_TAG);
70805 if (!radix_tree_tag_get(&cookie->stores, page->index,
70806 FSCACHE_COOKIE_PENDING_TAG)) {
70807- fscache_stat(&fscache_n_store_radix_deletes);
70808+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
70809 xpage = radix_tree_delete(&cookie->stores, page->index);
70810 }
70811 spin_unlock(&cookie->stores_lock);
70812@@ -177,7 +177,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
70813
70814 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
70815
70816- fscache_stat(&fscache_n_attr_changed_calls);
70817+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
70818
70819 if (fscache_object_is_active(object)) {
70820 fscache_stat(&fscache_n_cop_attr_changed);
70821@@ -204,11 +204,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
70822
70823 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
70824
70825- fscache_stat(&fscache_n_attr_changed);
70826+ fscache_stat_unchecked(&fscache_n_attr_changed);
70827
70828 op = kzalloc(sizeof(*op), GFP_KERNEL);
70829 if (!op) {
70830- fscache_stat(&fscache_n_attr_changed_nomem);
70831+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
70832 _leave(" = -ENOMEM");
70833 return -ENOMEM;
70834 }
70835@@ -230,7 +230,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
70836 if (fscache_submit_exclusive_op(object, op) < 0)
70837 goto nobufs_dec;
70838 spin_unlock(&cookie->lock);
70839- fscache_stat(&fscache_n_attr_changed_ok);
70840+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
70841 fscache_put_operation(op);
70842 _leave(" = 0");
70843 return 0;
70844@@ -242,7 +242,7 @@ nobufs:
70845 kfree(op);
70846 if (wake_cookie)
70847 __fscache_wake_unused_cookie(cookie);
70848- fscache_stat(&fscache_n_attr_changed_nobufs);
70849+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
70850 _leave(" = %d", -ENOBUFS);
70851 return -ENOBUFS;
70852 }
70853@@ -281,7 +281,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
70854 /* allocate a retrieval operation and attempt to submit it */
70855 op = kzalloc(sizeof(*op), GFP_NOIO);
70856 if (!op) {
70857- fscache_stat(&fscache_n_retrievals_nomem);
70858+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
70859 return NULL;
70860 }
70861
70862@@ -311,12 +311,12 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
70863 return 0;
70864 }
70865
70866- fscache_stat(&fscache_n_retrievals_wait);
70867+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
70868
70869 jif = jiffies;
70870 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
70871 TASK_INTERRUPTIBLE) != 0) {
70872- fscache_stat(&fscache_n_retrievals_intr);
70873+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
70874 _leave(" = -ERESTARTSYS");
70875 return -ERESTARTSYS;
70876 }
70877@@ -345,8 +345,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
70878 */
70879 int fscache_wait_for_operation_activation(struct fscache_object *object,
70880 struct fscache_operation *op,
70881- atomic_t *stat_op_waits,
70882- atomic_t *stat_object_dead,
70883+ atomic_unchecked_t *stat_op_waits,
70884+ atomic_unchecked_t *stat_object_dead,
70885 void (*do_cancel)(struct fscache_operation *))
70886 {
70887 int ret;
70888@@ -356,7 +356,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
70889
70890 _debug(">>> WT");
70891 if (stat_op_waits)
70892- fscache_stat(stat_op_waits);
70893+ fscache_stat_unchecked(stat_op_waits);
70894 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
70895 TASK_INTERRUPTIBLE) != 0) {
70896 ret = fscache_cancel_op(op, do_cancel);
70897@@ -373,7 +373,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
70898 check_if_dead:
70899 if (op->state == FSCACHE_OP_ST_CANCELLED) {
70900 if (stat_object_dead)
70901- fscache_stat(stat_object_dead);
70902+ fscache_stat_unchecked(stat_object_dead);
70903 _leave(" = -ENOBUFS [cancelled]");
70904 return -ENOBUFS;
70905 }
70906@@ -381,7 +381,7 @@ check_if_dead:
70907 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
70908 fscache_cancel_op(op, do_cancel);
70909 if (stat_object_dead)
70910- fscache_stat(stat_object_dead);
70911+ fscache_stat_unchecked(stat_object_dead);
70912 return -ENOBUFS;
70913 }
70914 return 0;
70915@@ -409,7 +409,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
70916
70917 _enter("%p,%p,,,", cookie, page);
70918
70919- fscache_stat(&fscache_n_retrievals);
70920+ fscache_stat_unchecked(&fscache_n_retrievals);
70921
70922 if (hlist_empty(&cookie->backing_objects))
70923 goto nobufs;
70924@@ -451,7 +451,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
70925 goto nobufs_unlock_dec;
70926 spin_unlock(&cookie->lock);
70927
70928- fscache_stat(&fscache_n_retrieval_ops);
70929+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
70930
70931 /* pin the netfs read context in case we need to do the actual netfs
70932 * read because we've encountered a cache read failure */
70933@@ -482,15 +482,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
70934
70935 error:
70936 if (ret == -ENOMEM)
70937- fscache_stat(&fscache_n_retrievals_nomem);
70938+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
70939 else if (ret == -ERESTARTSYS)
70940- fscache_stat(&fscache_n_retrievals_intr);
70941+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
70942 else if (ret == -ENODATA)
70943- fscache_stat(&fscache_n_retrievals_nodata);
70944+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
70945 else if (ret < 0)
70946- fscache_stat(&fscache_n_retrievals_nobufs);
70947+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
70948 else
70949- fscache_stat(&fscache_n_retrievals_ok);
70950+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
70951
70952 fscache_put_retrieval(op);
70953 _leave(" = %d", ret);
70954@@ -505,7 +505,7 @@ nobufs_unlock:
70955 __fscache_wake_unused_cookie(cookie);
70956 kfree(op);
70957 nobufs:
70958- fscache_stat(&fscache_n_retrievals_nobufs);
70959+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
70960 _leave(" = -ENOBUFS");
70961 return -ENOBUFS;
70962 }
70963@@ -544,7 +544,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
70964
70965 _enter("%p,,%d,,,", cookie, *nr_pages);
70966
70967- fscache_stat(&fscache_n_retrievals);
70968+ fscache_stat_unchecked(&fscache_n_retrievals);
70969
70970 if (hlist_empty(&cookie->backing_objects))
70971 goto nobufs;
70972@@ -582,7 +582,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
70973 goto nobufs_unlock_dec;
70974 spin_unlock(&cookie->lock);
70975
70976- fscache_stat(&fscache_n_retrieval_ops);
70977+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
70978
70979 /* pin the netfs read context in case we need to do the actual netfs
70980 * read because we've encountered a cache read failure */
70981@@ -613,15 +613,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
70982
70983 error:
70984 if (ret == -ENOMEM)
70985- fscache_stat(&fscache_n_retrievals_nomem);
70986+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
70987 else if (ret == -ERESTARTSYS)
70988- fscache_stat(&fscache_n_retrievals_intr);
70989+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
70990 else if (ret == -ENODATA)
70991- fscache_stat(&fscache_n_retrievals_nodata);
70992+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
70993 else if (ret < 0)
70994- fscache_stat(&fscache_n_retrievals_nobufs);
70995+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
70996 else
70997- fscache_stat(&fscache_n_retrievals_ok);
70998+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
70999
71000 fscache_put_retrieval(op);
71001 _leave(" = %d", ret);
71002@@ -636,7 +636,7 @@ nobufs_unlock:
71003 if (wake_cookie)
71004 __fscache_wake_unused_cookie(cookie);
71005 nobufs:
71006- fscache_stat(&fscache_n_retrievals_nobufs);
71007+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
71008 _leave(" = -ENOBUFS");
71009 return -ENOBUFS;
71010 }
71011@@ -661,7 +661,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
71012
71013 _enter("%p,%p,,,", cookie, page);
71014
71015- fscache_stat(&fscache_n_allocs);
71016+ fscache_stat_unchecked(&fscache_n_allocs);
71017
71018 if (hlist_empty(&cookie->backing_objects))
71019 goto nobufs;
71020@@ -695,7 +695,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
71021 goto nobufs_unlock_dec;
71022 spin_unlock(&cookie->lock);
71023
71024- fscache_stat(&fscache_n_alloc_ops);
71025+ fscache_stat_unchecked(&fscache_n_alloc_ops);
71026
71027 ret = fscache_wait_for_operation_activation(
71028 object, &op->op,
71029@@ -712,11 +712,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
71030
71031 error:
71032 if (ret == -ERESTARTSYS)
71033- fscache_stat(&fscache_n_allocs_intr);
71034+ fscache_stat_unchecked(&fscache_n_allocs_intr);
71035 else if (ret < 0)
71036- fscache_stat(&fscache_n_allocs_nobufs);
71037+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
71038 else
71039- fscache_stat(&fscache_n_allocs_ok);
71040+ fscache_stat_unchecked(&fscache_n_allocs_ok);
71041
71042 fscache_put_retrieval(op);
71043 _leave(" = %d", ret);
71044@@ -730,7 +730,7 @@ nobufs_unlock:
71045 if (wake_cookie)
71046 __fscache_wake_unused_cookie(cookie);
71047 nobufs:
71048- fscache_stat(&fscache_n_allocs_nobufs);
71049+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
71050 _leave(" = -ENOBUFS");
71051 return -ENOBUFS;
71052 }
71053@@ -806,7 +806,7 @@ static void fscache_write_op(struct fscache_operation *_op)
71054
71055 spin_lock(&cookie->stores_lock);
71056
71057- fscache_stat(&fscache_n_store_calls);
71058+ fscache_stat_unchecked(&fscache_n_store_calls);
71059
71060 /* find a page to store */
71061 page = NULL;
71062@@ -817,7 +817,7 @@ static void fscache_write_op(struct fscache_operation *_op)
71063 page = results[0];
71064 _debug("gang %d [%lx]", n, page->index);
71065 if (page->index > op->store_limit) {
71066- fscache_stat(&fscache_n_store_pages_over_limit);
71067+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
71068 goto superseded;
71069 }
71070
71071@@ -829,7 +829,7 @@ static void fscache_write_op(struct fscache_operation *_op)
71072 spin_unlock(&cookie->stores_lock);
71073 spin_unlock(&object->lock);
71074
71075- fscache_stat(&fscache_n_store_pages);
71076+ fscache_stat_unchecked(&fscache_n_store_pages);
71077 fscache_stat(&fscache_n_cop_write_page);
71078 ret = object->cache->ops->write_page(op, page);
71079 fscache_stat_d(&fscache_n_cop_write_page);
71080@@ -933,7 +933,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
71081 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
71082 ASSERT(PageFsCache(page));
71083
71084- fscache_stat(&fscache_n_stores);
71085+ fscache_stat_unchecked(&fscache_n_stores);
71086
71087 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
71088 _leave(" = -ENOBUFS [invalidating]");
71089@@ -992,7 +992,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
71090 spin_unlock(&cookie->stores_lock);
71091 spin_unlock(&object->lock);
71092
71093- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
71094+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
71095 op->store_limit = object->store_limit;
71096
71097 __fscache_use_cookie(cookie);
71098@@ -1001,8 +1001,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
71099
71100 spin_unlock(&cookie->lock);
71101 radix_tree_preload_end();
71102- fscache_stat(&fscache_n_store_ops);
71103- fscache_stat(&fscache_n_stores_ok);
71104+ fscache_stat_unchecked(&fscache_n_store_ops);
71105+ fscache_stat_unchecked(&fscache_n_stores_ok);
71106
71107 /* the work queue now carries its own ref on the object */
71108 fscache_put_operation(&op->op);
71109@@ -1010,14 +1010,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
71110 return 0;
71111
71112 already_queued:
71113- fscache_stat(&fscache_n_stores_again);
71114+ fscache_stat_unchecked(&fscache_n_stores_again);
71115 already_pending:
71116 spin_unlock(&cookie->stores_lock);
71117 spin_unlock(&object->lock);
71118 spin_unlock(&cookie->lock);
71119 radix_tree_preload_end();
71120 kfree(op);
71121- fscache_stat(&fscache_n_stores_ok);
71122+ fscache_stat_unchecked(&fscache_n_stores_ok);
71123 _leave(" = 0");
71124 return 0;
71125
71126@@ -1039,14 +1039,14 @@ nobufs:
71127 kfree(op);
71128 if (wake_cookie)
71129 __fscache_wake_unused_cookie(cookie);
71130- fscache_stat(&fscache_n_stores_nobufs);
71131+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
71132 _leave(" = -ENOBUFS");
71133 return -ENOBUFS;
71134
71135 nomem_free:
71136 kfree(op);
71137 nomem:
71138- fscache_stat(&fscache_n_stores_oom);
71139+ fscache_stat_unchecked(&fscache_n_stores_oom);
71140 _leave(" = -ENOMEM");
71141 return -ENOMEM;
71142 }
71143@@ -1064,7 +1064,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
71144 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
71145 ASSERTCMP(page, !=, NULL);
71146
71147- fscache_stat(&fscache_n_uncaches);
71148+ fscache_stat_unchecked(&fscache_n_uncaches);
71149
71150 /* cache withdrawal may beat us to it */
71151 if (!PageFsCache(page))
71152@@ -1115,7 +1115,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
71153 struct fscache_cookie *cookie = op->op.object->cookie;
71154
71155 #ifdef CONFIG_FSCACHE_STATS
71156- atomic_inc(&fscache_n_marks);
71157+ atomic_inc_unchecked(&fscache_n_marks);
71158 #endif
71159
71160 _debug("- mark %p{%lx}", page, page->index);
71161diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
71162index 40d13c7..ddf52b9 100644
71163--- a/fs/fscache/stats.c
71164+++ b/fs/fscache/stats.c
71165@@ -18,99 +18,99 @@
71166 /*
71167 * operation counters
71168 */
71169-atomic_t fscache_n_op_pend;
71170-atomic_t fscache_n_op_run;
71171-atomic_t fscache_n_op_enqueue;
71172-atomic_t fscache_n_op_requeue;
71173-atomic_t fscache_n_op_deferred_release;
71174-atomic_t fscache_n_op_release;
71175-atomic_t fscache_n_op_gc;
71176-atomic_t fscache_n_op_cancelled;
71177-atomic_t fscache_n_op_rejected;
71178+atomic_unchecked_t fscache_n_op_pend;
71179+atomic_unchecked_t fscache_n_op_run;
71180+atomic_unchecked_t fscache_n_op_enqueue;
71181+atomic_unchecked_t fscache_n_op_requeue;
71182+atomic_unchecked_t fscache_n_op_deferred_release;
71183+atomic_unchecked_t fscache_n_op_release;
71184+atomic_unchecked_t fscache_n_op_gc;
71185+atomic_unchecked_t fscache_n_op_cancelled;
71186+atomic_unchecked_t fscache_n_op_rejected;
71187
71188-atomic_t fscache_n_attr_changed;
71189-atomic_t fscache_n_attr_changed_ok;
71190-atomic_t fscache_n_attr_changed_nobufs;
71191-atomic_t fscache_n_attr_changed_nomem;
71192-atomic_t fscache_n_attr_changed_calls;
71193+atomic_unchecked_t fscache_n_attr_changed;
71194+atomic_unchecked_t fscache_n_attr_changed_ok;
71195+atomic_unchecked_t fscache_n_attr_changed_nobufs;
71196+atomic_unchecked_t fscache_n_attr_changed_nomem;
71197+atomic_unchecked_t fscache_n_attr_changed_calls;
71198
71199-atomic_t fscache_n_allocs;
71200-atomic_t fscache_n_allocs_ok;
71201-atomic_t fscache_n_allocs_wait;
71202-atomic_t fscache_n_allocs_nobufs;
71203-atomic_t fscache_n_allocs_intr;
71204-atomic_t fscache_n_allocs_object_dead;
71205-atomic_t fscache_n_alloc_ops;
71206-atomic_t fscache_n_alloc_op_waits;
71207+atomic_unchecked_t fscache_n_allocs;
71208+atomic_unchecked_t fscache_n_allocs_ok;
71209+atomic_unchecked_t fscache_n_allocs_wait;
71210+atomic_unchecked_t fscache_n_allocs_nobufs;
71211+atomic_unchecked_t fscache_n_allocs_intr;
71212+atomic_unchecked_t fscache_n_allocs_object_dead;
71213+atomic_unchecked_t fscache_n_alloc_ops;
71214+atomic_unchecked_t fscache_n_alloc_op_waits;
71215
71216-atomic_t fscache_n_retrievals;
71217-atomic_t fscache_n_retrievals_ok;
71218-atomic_t fscache_n_retrievals_wait;
71219-atomic_t fscache_n_retrievals_nodata;
71220-atomic_t fscache_n_retrievals_nobufs;
71221-atomic_t fscache_n_retrievals_intr;
71222-atomic_t fscache_n_retrievals_nomem;
71223-atomic_t fscache_n_retrievals_object_dead;
71224-atomic_t fscache_n_retrieval_ops;
71225-atomic_t fscache_n_retrieval_op_waits;
71226+atomic_unchecked_t fscache_n_retrievals;
71227+atomic_unchecked_t fscache_n_retrievals_ok;
71228+atomic_unchecked_t fscache_n_retrievals_wait;
71229+atomic_unchecked_t fscache_n_retrievals_nodata;
71230+atomic_unchecked_t fscache_n_retrievals_nobufs;
71231+atomic_unchecked_t fscache_n_retrievals_intr;
71232+atomic_unchecked_t fscache_n_retrievals_nomem;
71233+atomic_unchecked_t fscache_n_retrievals_object_dead;
71234+atomic_unchecked_t fscache_n_retrieval_ops;
71235+atomic_unchecked_t fscache_n_retrieval_op_waits;
71236
71237-atomic_t fscache_n_stores;
71238-atomic_t fscache_n_stores_ok;
71239-atomic_t fscache_n_stores_again;
71240-atomic_t fscache_n_stores_nobufs;
71241-atomic_t fscache_n_stores_oom;
71242-atomic_t fscache_n_store_ops;
71243-atomic_t fscache_n_store_calls;
71244-atomic_t fscache_n_store_pages;
71245-atomic_t fscache_n_store_radix_deletes;
71246-atomic_t fscache_n_store_pages_over_limit;
71247+atomic_unchecked_t fscache_n_stores;
71248+atomic_unchecked_t fscache_n_stores_ok;
71249+atomic_unchecked_t fscache_n_stores_again;
71250+atomic_unchecked_t fscache_n_stores_nobufs;
71251+atomic_unchecked_t fscache_n_stores_oom;
71252+atomic_unchecked_t fscache_n_store_ops;
71253+atomic_unchecked_t fscache_n_store_calls;
71254+atomic_unchecked_t fscache_n_store_pages;
71255+atomic_unchecked_t fscache_n_store_radix_deletes;
71256+atomic_unchecked_t fscache_n_store_pages_over_limit;
71257
71258-atomic_t fscache_n_store_vmscan_not_storing;
71259-atomic_t fscache_n_store_vmscan_gone;
71260-atomic_t fscache_n_store_vmscan_busy;
71261-atomic_t fscache_n_store_vmscan_cancelled;
71262-atomic_t fscache_n_store_vmscan_wait;
71263+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
71264+atomic_unchecked_t fscache_n_store_vmscan_gone;
71265+atomic_unchecked_t fscache_n_store_vmscan_busy;
71266+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
71267+atomic_unchecked_t fscache_n_store_vmscan_wait;
71268
71269-atomic_t fscache_n_marks;
71270-atomic_t fscache_n_uncaches;
71271+atomic_unchecked_t fscache_n_marks;
71272+atomic_unchecked_t fscache_n_uncaches;
71273
71274-atomic_t fscache_n_acquires;
71275-atomic_t fscache_n_acquires_null;
71276-atomic_t fscache_n_acquires_no_cache;
71277-atomic_t fscache_n_acquires_ok;
71278-atomic_t fscache_n_acquires_nobufs;
71279-atomic_t fscache_n_acquires_oom;
71280+atomic_unchecked_t fscache_n_acquires;
71281+atomic_unchecked_t fscache_n_acquires_null;
71282+atomic_unchecked_t fscache_n_acquires_no_cache;
71283+atomic_unchecked_t fscache_n_acquires_ok;
71284+atomic_unchecked_t fscache_n_acquires_nobufs;
71285+atomic_unchecked_t fscache_n_acquires_oom;
71286
71287-atomic_t fscache_n_invalidates;
71288-atomic_t fscache_n_invalidates_run;
71289+atomic_unchecked_t fscache_n_invalidates;
71290+atomic_unchecked_t fscache_n_invalidates_run;
71291
71292-atomic_t fscache_n_updates;
71293-atomic_t fscache_n_updates_null;
71294-atomic_t fscache_n_updates_run;
71295+atomic_unchecked_t fscache_n_updates;
71296+atomic_unchecked_t fscache_n_updates_null;
71297+atomic_unchecked_t fscache_n_updates_run;
71298
71299-atomic_t fscache_n_relinquishes;
71300-atomic_t fscache_n_relinquishes_null;
71301-atomic_t fscache_n_relinquishes_waitcrt;
71302-atomic_t fscache_n_relinquishes_retire;
71303+atomic_unchecked_t fscache_n_relinquishes;
71304+atomic_unchecked_t fscache_n_relinquishes_null;
71305+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
71306+atomic_unchecked_t fscache_n_relinquishes_retire;
71307
71308-atomic_t fscache_n_cookie_index;
71309-atomic_t fscache_n_cookie_data;
71310-atomic_t fscache_n_cookie_special;
71311+atomic_unchecked_t fscache_n_cookie_index;
71312+atomic_unchecked_t fscache_n_cookie_data;
71313+atomic_unchecked_t fscache_n_cookie_special;
71314
71315-atomic_t fscache_n_object_alloc;
71316-atomic_t fscache_n_object_no_alloc;
71317-atomic_t fscache_n_object_lookups;
71318-atomic_t fscache_n_object_lookups_negative;
71319-atomic_t fscache_n_object_lookups_positive;
71320-atomic_t fscache_n_object_lookups_timed_out;
71321-atomic_t fscache_n_object_created;
71322-atomic_t fscache_n_object_avail;
71323-atomic_t fscache_n_object_dead;
71324+atomic_unchecked_t fscache_n_object_alloc;
71325+atomic_unchecked_t fscache_n_object_no_alloc;
71326+atomic_unchecked_t fscache_n_object_lookups;
71327+atomic_unchecked_t fscache_n_object_lookups_negative;
71328+atomic_unchecked_t fscache_n_object_lookups_positive;
71329+atomic_unchecked_t fscache_n_object_lookups_timed_out;
71330+atomic_unchecked_t fscache_n_object_created;
71331+atomic_unchecked_t fscache_n_object_avail;
71332+atomic_unchecked_t fscache_n_object_dead;
71333
71334-atomic_t fscache_n_checkaux_none;
71335-atomic_t fscache_n_checkaux_okay;
71336-atomic_t fscache_n_checkaux_update;
71337-atomic_t fscache_n_checkaux_obsolete;
71338+atomic_unchecked_t fscache_n_checkaux_none;
71339+atomic_unchecked_t fscache_n_checkaux_okay;
71340+atomic_unchecked_t fscache_n_checkaux_update;
71341+atomic_unchecked_t fscache_n_checkaux_obsolete;
71342
71343 atomic_t fscache_n_cop_alloc_object;
71344 atomic_t fscache_n_cop_lookup_object;
71345@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
71346 seq_puts(m, "FS-Cache statistics\n");
71347
71348 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
71349- atomic_read(&fscache_n_cookie_index),
71350- atomic_read(&fscache_n_cookie_data),
71351- atomic_read(&fscache_n_cookie_special));
71352+ atomic_read_unchecked(&fscache_n_cookie_index),
71353+ atomic_read_unchecked(&fscache_n_cookie_data),
71354+ atomic_read_unchecked(&fscache_n_cookie_special));
71355
71356 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
71357- atomic_read(&fscache_n_object_alloc),
71358- atomic_read(&fscache_n_object_no_alloc),
71359- atomic_read(&fscache_n_object_avail),
71360- atomic_read(&fscache_n_object_dead));
71361+ atomic_read_unchecked(&fscache_n_object_alloc),
71362+ atomic_read_unchecked(&fscache_n_object_no_alloc),
71363+ atomic_read_unchecked(&fscache_n_object_avail),
71364+ atomic_read_unchecked(&fscache_n_object_dead));
71365 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
71366- atomic_read(&fscache_n_checkaux_none),
71367- atomic_read(&fscache_n_checkaux_okay),
71368- atomic_read(&fscache_n_checkaux_update),
71369- atomic_read(&fscache_n_checkaux_obsolete));
71370+ atomic_read_unchecked(&fscache_n_checkaux_none),
71371+ atomic_read_unchecked(&fscache_n_checkaux_okay),
71372+ atomic_read_unchecked(&fscache_n_checkaux_update),
71373+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
71374
71375 seq_printf(m, "Pages : mrk=%u unc=%u\n",
71376- atomic_read(&fscache_n_marks),
71377- atomic_read(&fscache_n_uncaches));
71378+ atomic_read_unchecked(&fscache_n_marks),
71379+ atomic_read_unchecked(&fscache_n_uncaches));
71380
71381 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
71382 " oom=%u\n",
71383- atomic_read(&fscache_n_acquires),
71384- atomic_read(&fscache_n_acquires_null),
71385- atomic_read(&fscache_n_acquires_no_cache),
71386- atomic_read(&fscache_n_acquires_ok),
71387- atomic_read(&fscache_n_acquires_nobufs),
71388- atomic_read(&fscache_n_acquires_oom));
71389+ atomic_read_unchecked(&fscache_n_acquires),
71390+ atomic_read_unchecked(&fscache_n_acquires_null),
71391+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
71392+ atomic_read_unchecked(&fscache_n_acquires_ok),
71393+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
71394+ atomic_read_unchecked(&fscache_n_acquires_oom));
71395
71396 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
71397- atomic_read(&fscache_n_object_lookups),
71398- atomic_read(&fscache_n_object_lookups_negative),
71399- atomic_read(&fscache_n_object_lookups_positive),
71400- atomic_read(&fscache_n_object_created),
71401- atomic_read(&fscache_n_object_lookups_timed_out));
71402+ atomic_read_unchecked(&fscache_n_object_lookups),
71403+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
71404+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
71405+ atomic_read_unchecked(&fscache_n_object_created),
71406+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
71407
71408 seq_printf(m, "Invals : n=%u run=%u\n",
71409- atomic_read(&fscache_n_invalidates),
71410- atomic_read(&fscache_n_invalidates_run));
71411+ atomic_read_unchecked(&fscache_n_invalidates),
71412+ atomic_read_unchecked(&fscache_n_invalidates_run));
71413
71414 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
71415- atomic_read(&fscache_n_updates),
71416- atomic_read(&fscache_n_updates_null),
71417- atomic_read(&fscache_n_updates_run));
71418+ atomic_read_unchecked(&fscache_n_updates),
71419+ atomic_read_unchecked(&fscache_n_updates_null),
71420+ atomic_read_unchecked(&fscache_n_updates_run));
71421
71422 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
71423- atomic_read(&fscache_n_relinquishes),
71424- atomic_read(&fscache_n_relinquishes_null),
71425- atomic_read(&fscache_n_relinquishes_waitcrt),
71426- atomic_read(&fscache_n_relinquishes_retire));
71427+ atomic_read_unchecked(&fscache_n_relinquishes),
71428+ atomic_read_unchecked(&fscache_n_relinquishes_null),
71429+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
71430+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
71431
71432 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
71433- atomic_read(&fscache_n_attr_changed),
71434- atomic_read(&fscache_n_attr_changed_ok),
71435- atomic_read(&fscache_n_attr_changed_nobufs),
71436- atomic_read(&fscache_n_attr_changed_nomem),
71437- atomic_read(&fscache_n_attr_changed_calls));
71438+ atomic_read_unchecked(&fscache_n_attr_changed),
71439+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
71440+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
71441+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
71442+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
71443
71444 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
71445- atomic_read(&fscache_n_allocs),
71446- atomic_read(&fscache_n_allocs_ok),
71447- atomic_read(&fscache_n_allocs_wait),
71448- atomic_read(&fscache_n_allocs_nobufs),
71449- atomic_read(&fscache_n_allocs_intr));
71450+ atomic_read_unchecked(&fscache_n_allocs),
71451+ atomic_read_unchecked(&fscache_n_allocs_ok),
71452+ atomic_read_unchecked(&fscache_n_allocs_wait),
71453+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
71454+ atomic_read_unchecked(&fscache_n_allocs_intr));
71455 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
71456- atomic_read(&fscache_n_alloc_ops),
71457- atomic_read(&fscache_n_alloc_op_waits),
71458- atomic_read(&fscache_n_allocs_object_dead));
71459+ atomic_read_unchecked(&fscache_n_alloc_ops),
71460+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
71461+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
71462
71463 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
71464 " int=%u oom=%u\n",
71465- atomic_read(&fscache_n_retrievals),
71466- atomic_read(&fscache_n_retrievals_ok),
71467- atomic_read(&fscache_n_retrievals_wait),
71468- atomic_read(&fscache_n_retrievals_nodata),
71469- atomic_read(&fscache_n_retrievals_nobufs),
71470- atomic_read(&fscache_n_retrievals_intr),
71471- atomic_read(&fscache_n_retrievals_nomem));
71472+ atomic_read_unchecked(&fscache_n_retrievals),
71473+ atomic_read_unchecked(&fscache_n_retrievals_ok),
71474+ atomic_read_unchecked(&fscache_n_retrievals_wait),
71475+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
71476+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
71477+ atomic_read_unchecked(&fscache_n_retrievals_intr),
71478+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
71479 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
71480- atomic_read(&fscache_n_retrieval_ops),
71481- atomic_read(&fscache_n_retrieval_op_waits),
71482- atomic_read(&fscache_n_retrievals_object_dead));
71483+ atomic_read_unchecked(&fscache_n_retrieval_ops),
71484+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
71485+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
71486
71487 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
71488- atomic_read(&fscache_n_stores),
71489- atomic_read(&fscache_n_stores_ok),
71490- atomic_read(&fscache_n_stores_again),
71491- atomic_read(&fscache_n_stores_nobufs),
71492- atomic_read(&fscache_n_stores_oom));
71493+ atomic_read_unchecked(&fscache_n_stores),
71494+ atomic_read_unchecked(&fscache_n_stores_ok),
71495+ atomic_read_unchecked(&fscache_n_stores_again),
71496+ atomic_read_unchecked(&fscache_n_stores_nobufs),
71497+ atomic_read_unchecked(&fscache_n_stores_oom));
71498 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
71499- atomic_read(&fscache_n_store_ops),
71500- atomic_read(&fscache_n_store_calls),
71501- atomic_read(&fscache_n_store_pages),
71502- atomic_read(&fscache_n_store_radix_deletes),
71503- atomic_read(&fscache_n_store_pages_over_limit));
71504+ atomic_read_unchecked(&fscache_n_store_ops),
71505+ atomic_read_unchecked(&fscache_n_store_calls),
71506+ atomic_read_unchecked(&fscache_n_store_pages),
71507+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
71508+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
71509
71510 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
71511- atomic_read(&fscache_n_store_vmscan_not_storing),
71512- atomic_read(&fscache_n_store_vmscan_gone),
71513- atomic_read(&fscache_n_store_vmscan_busy),
71514- atomic_read(&fscache_n_store_vmscan_cancelled),
71515- atomic_read(&fscache_n_store_vmscan_wait));
71516+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
71517+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
71518+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
71519+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
71520+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
71521
71522 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
71523- atomic_read(&fscache_n_op_pend),
71524- atomic_read(&fscache_n_op_run),
71525- atomic_read(&fscache_n_op_enqueue),
71526- atomic_read(&fscache_n_op_cancelled),
71527- atomic_read(&fscache_n_op_rejected));
71528+ atomic_read_unchecked(&fscache_n_op_pend),
71529+ atomic_read_unchecked(&fscache_n_op_run),
71530+ atomic_read_unchecked(&fscache_n_op_enqueue),
71531+ atomic_read_unchecked(&fscache_n_op_cancelled),
71532+ atomic_read_unchecked(&fscache_n_op_rejected));
71533 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
71534- atomic_read(&fscache_n_op_deferred_release),
71535- atomic_read(&fscache_n_op_release),
71536- atomic_read(&fscache_n_op_gc));
71537+ atomic_read_unchecked(&fscache_n_op_deferred_release),
71538+ atomic_read_unchecked(&fscache_n_op_release),
71539+ atomic_read_unchecked(&fscache_n_op_gc));
71540
71541 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
71542 atomic_read(&fscache_n_cop_alloc_object),
71543diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
71544index 28d0c7a..04816b7 100644
71545--- a/fs/fuse/cuse.c
71546+++ b/fs/fuse/cuse.c
71547@@ -611,10 +611,12 @@ static int __init cuse_init(void)
71548 INIT_LIST_HEAD(&cuse_conntbl[i]);
71549
71550 /* inherit and extend fuse_dev_operations */
71551- cuse_channel_fops = fuse_dev_operations;
71552- cuse_channel_fops.owner = THIS_MODULE;
71553- cuse_channel_fops.open = cuse_channel_open;
71554- cuse_channel_fops.release = cuse_channel_release;
71555+ pax_open_kernel();
71556+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
71557+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
71558+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
71559+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
71560+ pax_close_kernel();
71561
71562 cuse_class = class_create(THIS_MODULE, "cuse");
71563 if (IS_ERR(cuse_class))
71564diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
71565index 39706c5..a803c71 100644
71566--- a/fs/fuse/dev.c
71567+++ b/fs/fuse/dev.c
71568@@ -1405,7 +1405,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
71569 ret = 0;
71570 pipe_lock(pipe);
71571
71572- if (!pipe->readers) {
71573+ if (!atomic_read(&pipe->readers)) {
71574 send_sig(SIGPIPE, current, 0);
71575 if (!ret)
71576 ret = -EPIPE;
71577@@ -1434,7 +1434,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
71578 page_nr++;
71579 ret += buf->len;
71580
71581- if (pipe->files)
71582+ if (atomic_read(&pipe->files))
71583 do_wakeup = 1;
71584 }
71585
71586diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
71587index 1545b71..7fabe47 100644
71588--- a/fs/fuse/dir.c
71589+++ b/fs/fuse/dir.c
71590@@ -1394,7 +1394,7 @@ static char *read_link(struct dentry *dentry)
71591 return link;
71592 }
71593
71594-static void free_link(char *link)
71595+static void free_link(const char *link)
71596 {
71597 if (!IS_ERR(link))
71598 free_page((unsigned long) link);
71599diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
71600index f42dffb..4a4c435 100644
71601--- a/fs/gfs2/glock.c
71602+++ b/fs/gfs2/glock.c
71603@@ -385,9 +385,9 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
71604 if (held1 != held2) {
71605 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
71606 if (held2)
71607- gl->gl_lockref.count++;
71608+ __lockref_inc(&gl->gl_lockref);
71609 else
71610- gl->gl_lockref.count--;
71611+ __lockref_dec(&gl->gl_lockref);
71612 }
71613 if (held1 && held2 && list_empty(&gl->gl_holders))
71614 clear_bit(GLF_QUEUED, &gl->gl_flags);
71615@@ -614,9 +614,9 @@ out:
71616 out_sched:
71617 clear_bit(GLF_LOCK, &gl->gl_flags);
71618 smp_mb__after_atomic();
71619- gl->gl_lockref.count++;
71620+ __lockref_inc(&gl->gl_lockref);
71621 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
71622- gl->gl_lockref.count--;
71623+ __lockref_dec(&gl->gl_lockref);
71624 return;
71625
71626 out_unlock:
71627@@ -742,7 +742,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
71628 gl->gl_sbd = sdp;
71629 gl->gl_flags = 0;
71630 gl->gl_name = name;
71631- gl->gl_lockref.count = 1;
71632+ __lockref_set(&gl->gl_lockref, 1);
71633 gl->gl_state = LM_ST_UNLOCKED;
71634 gl->gl_target = LM_ST_UNLOCKED;
71635 gl->gl_demote_state = LM_ST_EXCLUSIVE;
71636@@ -1020,9 +1020,9 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
71637 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
71638 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
71639 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
71640- gl->gl_lockref.count++;
71641+ __lockref_inc(&gl->gl_lockref);
71642 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
71643- gl->gl_lockref.count--;
71644+ __lockref_dec(&gl->gl_lockref);
71645 }
71646 run_queue(gl, 1);
71647 spin_unlock(&gl->gl_spin);
71648@@ -1325,7 +1325,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
71649 }
71650 }
71651
71652- gl->gl_lockref.count++;
71653+ __lockref_inc(&gl->gl_lockref);
71654 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
71655 spin_unlock(&gl->gl_spin);
71656
71657@@ -1384,12 +1384,12 @@ add_back_to_lru:
71658 goto add_back_to_lru;
71659 }
71660 clear_bit(GLF_LRU, &gl->gl_flags);
71661- gl->gl_lockref.count++;
71662+ __lockref_inc(&gl->gl_lockref);
71663 if (demote_ok(gl))
71664 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
71665 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
71666 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
71667- gl->gl_lockref.count--;
71668+ __lockref_dec(&gl->gl_lockref);
71669 spin_unlock(&gl->gl_spin);
71670 cond_resched_lock(&lru_lock);
71671 }
71672@@ -1719,7 +1719,7 @@ void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
71673 state2str(gl->gl_demote_state), dtime,
71674 atomic_read(&gl->gl_ail_count),
71675 atomic_read(&gl->gl_revokes),
71676- (int)gl->gl_lockref.count, gl->gl_hold_time);
71677+ __lockref_read(&gl->gl_lockref), gl->gl_hold_time);
71678
71679 list_for_each_entry(gh, &gl->gl_holders, gh_list)
71680 dump_holder(seq, gh);
71681diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
71682index fe91951..ce38a6e 100644
71683--- a/fs/gfs2/glops.c
71684+++ b/fs/gfs2/glops.c
71685@@ -544,9 +544,9 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
71686
71687 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
71688 gl->gl_state == LM_ST_SHARED && ip) {
71689- gl->gl_lockref.count++;
71690+ __lockref_inc(&gl->gl_lockref);
71691 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
71692- gl->gl_lockref.count--;
71693+ __lockref_dec(&gl->gl_lockref);
71694 }
71695 }
71696
71697diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
71698index 3aa17d4..b338075 100644
71699--- a/fs/gfs2/quota.c
71700+++ b/fs/gfs2/quota.c
71701@@ -154,7 +154,7 @@ static enum lru_status gfs2_qd_isolate(struct list_head *item,
71702 if (!spin_trylock(&qd->qd_lockref.lock))
71703 return LRU_SKIP;
71704
71705- if (qd->qd_lockref.count == 0) {
71706+ if (__lockref_read(&qd->qd_lockref) == 0) {
71707 lockref_mark_dead(&qd->qd_lockref);
71708 list_lru_isolate_move(lru, &qd->qd_lru, dispose);
71709 }
71710@@ -221,7 +221,7 @@ static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, str
71711 return NULL;
71712
71713 qd->qd_sbd = sdp;
71714- qd->qd_lockref.count = 1;
71715+ __lockref_set(&qd->qd_lockref, 1);
71716 spin_lock_init(&qd->qd_lockref.lock);
71717 qd->qd_id = qid;
71718 qd->qd_slot = -1;
71719@@ -312,7 +312,7 @@ static void qd_put(struct gfs2_quota_data *qd)
71720 if (lockref_put_or_lock(&qd->qd_lockref))
71721 return;
71722
71723- qd->qd_lockref.count = 0;
71724+ __lockref_set(&qd->qd_lockref, 0);
71725 list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
71726 spin_unlock(&qd->qd_lockref.lock);
71727
71728diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
71729index fd62cae..3494dfa 100644
71730--- a/fs/hostfs/hostfs_kern.c
71731+++ b/fs/hostfs/hostfs_kern.c
71732@@ -908,7 +908,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
71733
71734 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
71735 {
71736- char *s = nd_get_link(nd);
71737+ const char *s = nd_get_link(nd);
71738 if (!IS_ERR(s))
71739 __putname(s);
71740 }
71741diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
71742index c274aca..772fa5e 100644
71743--- a/fs/hugetlbfs/inode.c
71744+++ b/fs/hugetlbfs/inode.c
71745@@ -148,6 +148,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
71746 struct mm_struct *mm = current->mm;
71747 struct vm_area_struct *vma;
71748 struct hstate *h = hstate_file(file);
71749+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
71750 struct vm_unmapped_area_info info;
71751
71752 if (len & ~huge_page_mask(h))
71753@@ -161,17 +162,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
71754 return addr;
71755 }
71756
71757+#ifdef CONFIG_PAX_RANDMMAP
71758+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71759+#endif
71760+
71761 if (addr) {
71762 addr = ALIGN(addr, huge_page_size(h));
71763 vma = find_vma(mm, addr);
71764- if (TASK_SIZE - len >= addr &&
71765- (!vma || addr + len <= vma->vm_start))
71766+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
71767 return addr;
71768 }
71769
71770 info.flags = 0;
71771 info.length = len;
71772 info.low_limit = TASK_UNMAPPED_BASE;
71773+
71774+#ifdef CONFIG_PAX_RANDMMAP
71775+ if (mm->pax_flags & MF_PAX_RANDMMAP)
71776+ info.low_limit += mm->delta_mmap;
71777+#endif
71778+
71779 info.high_limit = TASK_SIZE;
71780 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
71781 info.align_offset = 0;
71782@@ -912,7 +922,7 @@ static struct file_system_type hugetlbfs_fs_type = {
71783 };
71784 MODULE_ALIAS_FS("hugetlbfs");
71785
71786-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
71787+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
71788
71789 static int can_do_hugetlb_shm(void)
71790 {
71791diff --git a/fs/inode.c b/fs/inode.c
71792index f00b16f..b653fea 100644
71793--- a/fs/inode.c
71794+++ b/fs/inode.c
71795@@ -830,16 +830,20 @@ unsigned int get_next_ino(void)
71796 unsigned int *p = &get_cpu_var(last_ino);
71797 unsigned int res = *p;
71798
71799+start:
71800+
71801 #ifdef CONFIG_SMP
71802 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
71803- static atomic_t shared_last_ino;
71804- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
71805+ static atomic_unchecked_t shared_last_ino;
71806+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
71807
71808 res = next - LAST_INO_BATCH;
71809 }
71810 #endif
71811
71812- *p = ++res;
71813+ if (unlikely(!++res))
71814+ goto start; /* never zero */
71815+ *p = res;
71816 put_cpu_var(last_ino);
71817 return res;
71818 }
71819diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
71820index 4a6cf28..d3a29d3 100644
71821--- a/fs/jffs2/erase.c
71822+++ b/fs/jffs2/erase.c
71823@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
71824 struct jffs2_unknown_node marker = {
71825 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
71826 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
71827- .totlen = cpu_to_je32(c->cleanmarker_size)
71828+ .totlen = cpu_to_je32(c->cleanmarker_size),
71829+ .hdr_crc = cpu_to_je32(0)
71830 };
71831
71832 jffs2_prealloc_raw_node_refs(c, jeb, 1);
71833diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
71834index 09ed551..45684f8 100644
71835--- a/fs/jffs2/wbuf.c
71836+++ b/fs/jffs2/wbuf.c
71837@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
71838 {
71839 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
71840 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
71841- .totlen = constant_cpu_to_je32(8)
71842+ .totlen = constant_cpu_to_je32(8),
71843+ .hdr_crc = constant_cpu_to_je32(0)
71844 };
71845
71846 /*
71847diff --git a/fs/jfs/super.c b/fs/jfs/super.c
71848index 5d30c56..8c45372 100644
71849--- a/fs/jfs/super.c
71850+++ b/fs/jfs/super.c
71851@@ -901,7 +901,7 @@ static int __init init_jfs_fs(void)
71852
71853 jfs_inode_cachep =
71854 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
71855- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
71856+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
71857 init_once);
71858 if (jfs_inode_cachep == NULL)
71859 return -ENOMEM;
71860diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
71861index 6acc964..eca491f 100644
71862--- a/fs/kernfs/dir.c
71863+++ b/fs/kernfs/dir.c
71864@@ -182,7 +182,7 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
71865 *
71866 * Returns 31 bit hash of ns + name (so it fits in an off_t )
71867 */
71868-static unsigned int kernfs_name_hash(const char *name, const void *ns)
71869+static unsigned int kernfs_name_hash(const unsigned char *name, const void *ns)
71870 {
71871 unsigned long hash = init_name_hash();
71872 unsigned int len = strlen(name);
71873@@ -831,6 +831,12 @@ static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry,
71874 ret = scops->mkdir(parent, dentry->d_name.name, mode);
71875
71876 kernfs_put_active(parent);
71877+
71878+ if (!ret) {
71879+ struct dentry *dentry_ret = kernfs_iop_lookup(dir, dentry, 0);
71880+ ret = PTR_ERR_OR_ZERO(dentry_ret);
71881+ }
71882+
71883 return ret;
71884 }
71885
71886diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
71887index 2bacb99..f745182 100644
71888--- a/fs/kernfs/file.c
71889+++ b/fs/kernfs/file.c
71890@@ -34,7 +34,7 @@ static DEFINE_MUTEX(kernfs_open_file_mutex);
71891
71892 struct kernfs_open_node {
71893 atomic_t refcnt;
71894- atomic_t event;
71895+ atomic_unchecked_t event;
71896 wait_queue_head_t poll;
71897 struct list_head files; /* goes through kernfs_open_file.list */
71898 };
71899@@ -163,7 +163,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v)
71900 {
71901 struct kernfs_open_file *of = sf->private;
71902
71903- of->event = atomic_read(&of->kn->attr.open->event);
71904+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
71905
71906 return of->kn->attr.ops->seq_show(sf, v);
71907 }
71908@@ -207,7 +207,7 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
71909 goto out_free;
71910 }
71911
71912- of->event = atomic_read(&of->kn->attr.open->event);
71913+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
71914 ops = kernfs_ops(of->kn);
71915 if (ops->read)
71916 len = ops->read(of, buf, len, *ppos);
71917@@ -272,7 +272,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
71918 {
71919 struct kernfs_open_file *of = kernfs_of(file);
71920 const struct kernfs_ops *ops;
71921- size_t len;
71922+ ssize_t len;
71923 char *buf;
71924
71925 if (of->atomic_write_len) {
71926@@ -385,12 +385,12 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
71927 return ret;
71928 }
71929
71930-static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
71931- void *buf, int len, int write)
71932+static ssize_t kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
71933+ void *buf, size_t len, int write)
71934 {
71935 struct file *file = vma->vm_file;
71936 struct kernfs_open_file *of = kernfs_of(file);
71937- int ret;
71938+ ssize_t ret;
71939
71940 if (!of->vm_ops)
71941 return -EINVAL;
71942@@ -569,7 +569,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
71943 return -ENOMEM;
71944
71945 atomic_set(&new_on->refcnt, 0);
71946- atomic_set(&new_on->event, 1);
71947+ atomic_set_unchecked(&new_on->event, 1);
71948 init_waitqueue_head(&new_on->poll);
71949 INIT_LIST_HEAD(&new_on->files);
71950 goto retry;
71951@@ -793,7 +793,7 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
71952
71953 kernfs_put_active(kn);
71954
71955- if (of->event != atomic_read(&on->event))
71956+ if (of->event != atomic_read_unchecked(&on->event))
71957 goto trigger;
71958
71959 return DEFAULT_POLLMASK;
71960@@ -824,7 +824,7 @@ repeat:
71961
71962 on = kn->attr.open;
71963 if (on) {
71964- atomic_inc(&on->event);
71965+ atomic_inc_unchecked(&on->event);
71966 wake_up_interruptible(&on->poll);
71967 }
71968
71969diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
71970index 8a19889..4c3069a 100644
71971--- a/fs/kernfs/symlink.c
71972+++ b/fs/kernfs/symlink.c
71973@@ -128,7 +128,7 @@ static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd)
71974 static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd,
71975 void *cookie)
71976 {
71977- char *page = nd_get_link(nd);
71978+ const char *page = nd_get_link(nd);
71979 if (!IS_ERR(page))
71980 free_page((unsigned long)page);
71981 }
71982diff --git a/fs/libfs.c b/fs/libfs.c
71983index 0ab6512..cd9982d 100644
71984--- a/fs/libfs.c
71985+++ b/fs/libfs.c
71986@@ -160,6 +160,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
71987
71988 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
71989 struct dentry *next = list_entry(p, struct dentry, d_child);
71990+ char d_name[sizeof(next->d_iname)];
71991+ const unsigned char *name;
71992+
71993 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
71994 if (!simple_positive(next)) {
71995 spin_unlock(&next->d_lock);
71996@@ -168,7 +171,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
71997
71998 spin_unlock(&next->d_lock);
71999 spin_unlock(&dentry->d_lock);
72000- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
72001+ name = next->d_name.name;
72002+ if (name == next->d_iname) {
72003+ memcpy(d_name, name, next->d_name.len);
72004+ name = d_name;
72005+ }
72006+ if (!dir_emit(ctx, name, next->d_name.len,
72007 next->d_inode->i_ino, dt_type(next->d_inode)))
72008 return 0;
72009 spin_lock(&dentry->d_lock);
72010@@ -1027,7 +1035,7 @@ EXPORT_SYMBOL(noop_fsync);
72011 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
72012 void *cookie)
72013 {
72014- char *s = nd_get_link(nd);
72015+ const char *s = nd_get_link(nd);
72016 if (!IS_ERR(s))
72017 kfree(s);
72018 }
72019diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
72020index acd3947..1f896e2 100644
72021--- a/fs/lockd/clntproc.c
72022+++ b/fs/lockd/clntproc.c
72023@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
72024 /*
72025 * Cookie counter for NLM requests
72026 */
72027-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
72028+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
72029
72030 void nlmclnt_next_cookie(struct nlm_cookie *c)
72031 {
72032- u32 cookie = atomic_inc_return(&nlm_cookie);
72033+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
72034
72035 memcpy(c->data, &cookie, 4);
72036 c->len=4;
72037diff --git a/fs/mount.h b/fs/mount.h
72038index 6a61c2b..bd79179 100644
72039--- a/fs/mount.h
72040+++ b/fs/mount.h
72041@@ -13,7 +13,7 @@ struct mnt_namespace {
72042 u64 seq; /* Sequence number to prevent loops */
72043 wait_queue_head_t poll;
72044 u64 event;
72045-};
72046+} __randomize_layout;
72047
72048 struct mnt_pcp {
72049 int mnt_count;
72050@@ -65,7 +65,7 @@ struct mount {
72051 struct hlist_head mnt_pins;
72052 struct fs_pin mnt_umount;
72053 struct dentry *mnt_ex_mountpoint;
72054-};
72055+} __randomize_layout;
72056
72057 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
72058
72059diff --git a/fs/namei.c b/fs/namei.c
72060index 50a8583..44c470a 100644
72061--- a/fs/namei.c
72062+++ b/fs/namei.c
72063@@ -337,17 +337,32 @@ int generic_permission(struct inode *inode, int mask)
72064 if (ret != -EACCES)
72065 return ret;
72066
72067+#ifdef CONFIG_GRKERNSEC
72068+ /* we'll block if we have to log due to a denied capability use */
72069+ if (mask & MAY_NOT_BLOCK)
72070+ return -ECHILD;
72071+#endif
72072+
72073 if (S_ISDIR(inode->i_mode)) {
72074 /* DACs are overridable for directories */
72075- if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
72076- return 0;
72077 if (!(mask & MAY_WRITE))
72078- if (capable_wrt_inode_uidgid(inode,
72079- CAP_DAC_READ_SEARCH))
72080+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
72081+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
72082 return 0;
72083+ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
72084+ return 0;
72085 return -EACCES;
72086 }
72087 /*
72088+ * Searching includes executable on directories, else just read.
72089+ */
72090+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
72091+ if (mask == MAY_READ)
72092+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
72093+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
72094+ return 0;
72095+
72096+ /*
72097 * Read/write DACs are always overridable.
72098 * Executable DACs are overridable when there is
72099 * at least one exec bit set.
72100@@ -356,14 +371,6 @@ int generic_permission(struct inode *inode, int mask)
72101 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
72102 return 0;
72103
72104- /*
72105- * Searching includes executable on directories, else just read.
72106- */
72107- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
72108- if (mask == MAY_READ)
72109- if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
72110- return 0;
72111-
72112 return -EACCES;
72113 }
72114 EXPORT_SYMBOL(generic_permission);
72115@@ -503,7 +510,7 @@ struct nameidata {
72116 int last_type;
72117 unsigned depth;
72118 struct file *base;
72119- char *saved_names[MAX_NESTED_LINKS + 1];
72120+ const char *saved_names[MAX_NESTED_LINKS + 1];
72121 };
72122
72123 /*
72124@@ -714,13 +721,13 @@ void nd_jump_link(struct nameidata *nd, struct path *path)
72125 nd->flags |= LOOKUP_JUMPED;
72126 }
72127
72128-void nd_set_link(struct nameidata *nd, char *path)
72129+void nd_set_link(struct nameidata *nd, const char *path)
72130 {
72131 nd->saved_names[nd->depth] = path;
72132 }
72133 EXPORT_SYMBOL(nd_set_link);
72134
72135-char *nd_get_link(struct nameidata *nd)
72136+const char *nd_get_link(const struct nameidata *nd)
72137 {
72138 return nd->saved_names[nd->depth];
72139 }
72140@@ -855,7 +862,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
72141 {
72142 struct dentry *dentry = link->dentry;
72143 int error;
72144- char *s;
72145+ const char *s;
72146
72147 BUG_ON(nd->flags & LOOKUP_RCU);
72148
72149@@ -876,6 +883,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
72150 if (error)
72151 goto out_put_nd_path;
72152
72153+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
72154+ dentry->d_inode, dentry, nd->path.mnt)) {
72155+ error = -EACCES;
72156+ goto out_put_nd_path;
72157+ }
72158+
72159 nd->last_type = LAST_BIND;
72160 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
72161 error = PTR_ERR(*p);
72162@@ -1640,6 +1653,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
72163 if (res)
72164 break;
72165 res = walk_component(nd, path, LOOKUP_FOLLOW);
72166+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
72167+ res = -EACCES;
72168 put_link(nd, &link, cookie);
72169 } while (res > 0);
72170
72171@@ -1712,7 +1727,7 @@ EXPORT_SYMBOL(full_name_hash);
72172 static inline u64 hash_name(const char *name)
72173 {
72174 unsigned long a, b, adata, bdata, mask, hash, len;
72175- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
72176+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
72177
72178 hash = a = 0;
72179 len = -sizeof(unsigned long);
72180@@ -2007,6 +2022,8 @@ static int path_lookupat(int dfd, const char *name,
72181 if (err)
72182 break;
72183 err = lookup_last(nd, &path);
72184+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
72185+ err = -EACCES;
72186 put_link(nd, &link, cookie);
72187 }
72188 }
72189@@ -2014,6 +2031,13 @@ static int path_lookupat(int dfd, const char *name,
72190 if (!err)
72191 err = complete_walk(nd);
72192
72193+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
72194+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
72195+ path_put(&nd->path);
72196+ err = -ENOENT;
72197+ }
72198+ }
72199+
72200 if (!err && nd->flags & LOOKUP_DIRECTORY) {
72201 if (!d_can_lookup(nd->path.dentry)) {
72202 path_put(&nd->path);
72203@@ -2035,8 +2059,15 @@ static int filename_lookup(int dfd, struct filename *name,
72204 retval = path_lookupat(dfd, name->name,
72205 flags | LOOKUP_REVAL, nd);
72206
72207- if (likely(!retval))
72208+ if (likely(!retval)) {
72209 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
72210+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
72211+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
72212+ path_put(&nd->path);
72213+ return -ENOENT;
72214+ }
72215+ }
72216+ }
72217 return retval;
72218 }
72219
72220@@ -2615,6 +2646,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
72221 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
72222 return -EPERM;
72223
72224+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
72225+ return -EPERM;
72226+ if (gr_handle_rawio(inode))
72227+ return -EPERM;
72228+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
72229+ return -EACCES;
72230+
72231 return 0;
72232 }
72233
72234@@ -2846,7 +2884,7 @@ looked_up:
72235 * cleared otherwise prior to returning.
72236 */
72237 static int lookup_open(struct nameidata *nd, struct path *path,
72238- struct file *file,
72239+ struct path *link, struct file *file,
72240 const struct open_flags *op,
72241 bool got_write, int *opened)
72242 {
72243@@ -2881,6 +2919,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
72244 /* Negative dentry, just create the file */
72245 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
72246 umode_t mode = op->mode;
72247+
72248+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
72249+ error = -EACCES;
72250+ goto out_dput;
72251+ }
72252+
72253+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
72254+ error = -EACCES;
72255+ goto out_dput;
72256+ }
72257+
72258 if (!IS_POSIXACL(dir->d_inode))
72259 mode &= ~current_umask();
72260 /*
72261@@ -2902,6 +2951,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
72262 nd->flags & LOOKUP_EXCL);
72263 if (error)
72264 goto out_dput;
72265+ else
72266+ gr_handle_create(dentry, nd->path.mnt);
72267 }
72268 out_no_open:
72269 path->dentry = dentry;
72270@@ -2916,7 +2967,7 @@ out_dput:
72271 /*
72272 * Handle the last step of open()
72273 */
72274-static int do_last(struct nameidata *nd, struct path *path,
72275+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
72276 struct file *file, const struct open_flags *op,
72277 int *opened, struct filename *name)
72278 {
72279@@ -2966,6 +3017,15 @@ static int do_last(struct nameidata *nd, struct path *path,
72280 if (error)
72281 return error;
72282
72283+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
72284+ error = -ENOENT;
72285+ goto out;
72286+ }
72287+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
72288+ error = -EACCES;
72289+ goto out;
72290+ }
72291+
72292 audit_inode(name, dir, LOOKUP_PARENT);
72293 error = -EISDIR;
72294 /* trailing slashes? */
72295@@ -2985,7 +3045,7 @@ retry_lookup:
72296 */
72297 }
72298 mutex_lock(&dir->d_inode->i_mutex);
72299- error = lookup_open(nd, path, file, op, got_write, opened);
72300+ error = lookup_open(nd, path, link, file, op, got_write, opened);
72301 mutex_unlock(&dir->d_inode->i_mutex);
72302
72303 if (error <= 0) {
72304@@ -3009,11 +3069,28 @@ retry_lookup:
72305 goto finish_open_created;
72306 }
72307
72308+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
72309+ error = -ENOENT;
72310+ goto exit_dput;
72311+ }
72312+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
72313+ error = -EACCES;
72314+ goto exit_dput;
72315+ }
72316+
72317 /*
72318 * create/update audit record if it already exists.
72319 */
72320- if (d_is_positive(path->dentry))
72321+ if (d_is_positive(path->dentry)) {
72322+ /* only check if O_CREAT is specified, all other checks need to go
72323+ into may_open */
72324+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
72325+ error = -EACCES;
72326+ goto exit_dput;
72327+ }
72328+
72329 audit_inode(name, path->dentry, 0);
72330+ }
72331
72332 /*
72333 * If atomic_open() acquired write access it is dropped now due to
72334@@ -3055,6 +3132,11 @@ finish_lookup:
72335 }
72336 }
72337 BUG_ON(inode != path->dentry->d_inode);
72338+ /* if we're resolving a symlink to another symlink */
72339+ if (link && gr_handle_symlink_owner(link, inode)) {
72340+ error = -EACCES;
72341+ goto out;
72342+ }
72343 return 1;
72344 }
72345
72346@@ -3074,7 +3156,18 @@ finish_open:
72347 path_put(&save_parent);
72348 return error;
72349 }
72350+
72351+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
72352+ error = -ENOENT;
72353+ goto out;
72354+ }
72355+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
72356+ error = -EACCES;
72357+ goto out;
72358+ }
72359+
72360 audit_inode(name, nd->path.dentry, 0);
72361+
72362 error = -EISDIR;
72363 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
72364 goto out;
72365@@ -3235,7 +3328,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
72366 if (unlikely(error))
72367 goto out;
72368
72369- error = do_last(nd, &path, file, op, &opened, pathname);
72370+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
72371 while (unlikely(error > 0)) { /* trailing symlink */
72372 struct path link = path;
72373 void *cookie;
72374@@ -3253,7 +3346,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
72375 error = follow_link(&link, nd, &cookie);
72376 if (unlikely(error))
72377 break;
72378- error = do_last(nd, &path, file, op, &opened, pathname);
72379+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
72380 put_link(nd, &link, cookie);
72381 }
72382 out:
72383@@ -3356,9 +3449,11 @@ static struct dentry *filename_create(int dfd, struct filename *name,
72384 goto unlock;
72385
72386 error = -EEXIST;
72387- if (d_is_positive(dentry))
72388+ if (d_is_positive(dentry)) {
72389+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
72390+ error = -ENOENT;
72391 goto fail;
72392-
72393+ }
72394 /*
72395 * Special case - lookup gave negative, but... we had foo/bar/
72396 * From the vfs_mknod() POV we just have a negative dentry -
72397@@ -3423,6 +3518,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
72398 }
72399 EXPORT_SYMBOL(user_path_create);
72400
72401+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
72402+{
72403+ struct filename *tmp = getname(pathname);
72404+ struct dentry *res;
72405+ if (IS_ERR(tmp))
72406+ return ERR_CAST(tmp);
72407+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
72408+ if (IS_ERR(res))
72409+ putname(tmp);
72410+ else
72411+ *to = tmp;
72412+ return res;
72413+}
72414+
72415 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
72416 {
72417 int error = may_create(dir, dentry);
72418@@ -3486,6 +3595,17 @@ retry:
72419
72420 if (!IS_POSIXACL(path.dentry->d_inode))
72421 mode &= ~current_umask();
72422+
72423+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
72424+ error = -EPERM;
72425+ goto out;
72426+ }
72427+
72428+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
72429+ error = -EACCES;
72430+ goto out;
72431+ }
72432+
72433 error = security_path_mknod(&path, dentry, mode, dev);
72434 if (error)
72435 goto out;
72436@@ -3501,6 +3621,8 @@ retry:
72437 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
72438 break;
72439 }
72440+ if (!error)
72441+ gr_handle_create(dentry, path.mnt);
72442 out:
72443 done_path_create(&path, dentry);
72444 if (retry_estale(error, lookup_flags)) {
72445@@ -3555,9 +3677,16 @@ retry:
72446
72447 if (!IS_POSIXACL(path.dentry->d_inode))
72448 mode &= ~current_umask();
72449+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
72450+ error = -EACCES;
72451+ goto out;
72452+ }
72453 error = security_path_mkdir(&path, dentry, mode);
72454 if (!error)
72455 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
72456+ if (!error)
72457+ gr_handle_create(dentry, path.mnt);
72458+out:
72459 done_path_create(&path, dentry);
72460 if (retry_estale(error, lookup_flags)) {
72461 lookup_flags |= LOOKUP_REVAL;
72462@@ -3590,7 +3719,7 @@ void dentry_unhash(struct dentry *dentry)
72463 {
72464 shrink_dcache_parent(dentry);
72465 spin_lock(&dentry->d_lock);
72466- if (dentry->d_lockref.count == 1)
72467+ if (__lockref_read(&dentry->d_lockref) == 1)
72468 __d_drop(dentry);
72469 spin_unlock(&dentry->d_lock);
72470 }
72471@@ -3641,6 +3770,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
72472 struct filename *name;
72473 struct dentry *dentry;
72474 struct nameidata nd;
72475+ u64 saved_ino = 0;
72476+ dev_t saved_dev = 0;
72477 unsigned int lookup_flags = 0;
72478 retry:
72479 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
72480@@ -3673,10 +3804,21 @@ retry:
72481 error = -ENOENT;
72482 goto exit3;
72483 }
72484+
72485+ saved_ino = gr_get_ino_from_dentry(dentry);
72486+ saved_dev = gr_get_dev_from_dentry(dentry);
72487+
72488+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
72489+ error = -EACCES;
72490+ goto exit3;
72491+ }
72492+
72493 error = security_path_rmdir(&nd.path, dentry);
72494 if (error)
72495 goto exit3;
72496 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
72497+ if (!error && (saved_dev || saved_ino))
72498+ gr_handle_delete(saved_ino, saved_dev);
72499 exit3:
72500 dput(dentry);
72501 exit2:
72502@@ -3769,6 +3911,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
72503 struct nameidata nd;
72504 struct inode *inode = NULL;
72505 struct inode *delegated_inode = NULL;
72506+ u64 saved_ino = 0;
72507+ dev_t saved_dev = 0;
72508 unsigned int lookup_flags = 0;
72509 retry:
72510 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
72511@@ -3795,10 +3939,22 @@ retry_deleg:
72512 if (d_is_negative(dentry))
72513 goto slashes;
72514 ihold(inode);
72515+
72516+ if (inode->i_nlink <= 1) {
72517+ saved_ino = gr_get_ino_from_dentry(dentry);
72518+ saved_dev = gr_get_dev_from_dentry(dentry);
72519+ }
72520+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
72521+ error = -EACCES;
72522+ goto exit2;
72523+ }
72524+
72525 error = security_path_unlink(&nd.path, dentry);
72526 if (error)
72527 goto exit2;
72528 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
72529+ if (!error && (saved_ino || saved_dev))
72530+ gr_handle_delete(saved_ino, saved_dev);
72531 exit2:
72532 dput(dentry);
72533 }
72534@@ -3887,9 +4043,17 @@ retry:
72535 if (IS_ERR(dentry))
72536 goto out_putname;
72537
72538+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
72539+ error = -EACCES;
72540+ goto out;
72541+ }
72542+
72543 error = security_path_symlink(&path, dentry, from->name);
72544 if (!error)
72545 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
72546+ if (!error)
72547+ gr_handle_create(dentry, path.mnt);
72548+out:
72549 done_path_create(&path, dentry);
72550 if (retry_estale(error, lookup_flags)) {
72551 lookup_flags |= LOOKUP_REVAL;
72552@@ -3993,6 +4157,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
72553 struct dentry *new_dentry;
72554 struct path old_path, new_path;
72555 struct inode *delegated_inode = NULL;
72556+ struct filename *to = NULL;
72557 int how = 0;
72558 int error;
72559
72560@@ -4016,7 +4181,7 @@ retry:
72561 if (error)
72562 return error;
72563
72564- new_dentry = user_path_create(newdfd, newname, &new_path,
72565+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
72566 (how & LOOKUP_REVAL));
72567 error = PTR_ERR(new_dentry);
72568 if (IS_ERR(new_dentry))
72569@@ -4028,11 +4193,28 @@ retry:
72570 error = may_linkat(&old_path);
72571 if (unlikely(error))
72572 goto out_dput;
72573+
72574+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
72575+ old_path.dentry->d_inode,
72576+ old_path.dentry->d_inode->i_mode, to)) {
72577+ error = -EACCES;
72578+ goto out_dput;
72579+ }
72580+
72581+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
72582+ old_path.dentry, old_path.mnt, to)) {
72583+ error = -EACCES;
72584+ goto out_dput;
72585+ }
72586+
72587 error = security_path_link(old_path.dentry, &new_path, new_dentry);
72588 if (error)
72589 goto out_dput;
72590 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
72591+ if (!error)
72592+ gr_handle_create(new_dentry, new_path.mnt);
72593 out_dput:
72594+ putname(to);
72595 done_path_create(&new_path, new_dentry);
72596 if (delegated_inode) {
72597 error = break_deleg_wait(&delegated_inode);
72598@@ -4348,6 +4530,20 @@ retry_deleg:
72599 if (new_dentry == trap)
72600 goto exit5;
72601
72602+ if (gr_bad_chroot_rename(old_dentry, oldnd.path.mnt, new_dentry, newnd.path.mnt)) {
72603+ /* use EXDEV error to cause 'mv' to switch to an alternative
72604+ * method for usability
72605+ */
72606+ error = -EXDEV;
72607+ goto exit5;
72608+ }
72609+
72610+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
72611+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
72612+ to, flags);
72613+ if (error)
72614+ goto exit5;
72615+
72616 error = security_path_rename(&oldnd.path, old_dentry,
72617 &newnd.path, new_dentry, flags);
72618 if (error)
72619@@ -4355,6 +4551,9 @@ retry_deleg:
72620 error = vfs_rename(old_dir->d_inode, old_dentry,
72621 new_dir->d_inode, new_dentry,
72622 &delegated_inode, flags);
72623+ if (!error)
72624+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
72625+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0, flags);
72626 exit5:
72627 dput(new_dentry);
72628 exit4:
72629@@ -4411,14 +4610,24 @@ EXPORT_SYMBOL(vfs_whiteout);
72630
72631 int readlink_copy(char __user *buffer, int buflen, const char *link)
72632 {
72633+ char tmpbuf[64];
72634+ const char *newlink;
72635 int len = PTR_ERR(link);
72636+
72637 if (IS_ERR(link))
72638 goto out;
72639
72640 len = strlen(link);
72641 if (len > (unsigned) buflen)
72642 len = buflen;
72643- if (copy_to_user(buffer, link, len))
72644+
72645+ if (len < sizeof(tmpbuf)) {
72646+ memcpy(tmpbuf, link, len);
72647+ newlink = tmpbuf;
72648+ } else
72649+ newlink = link;
72650+
72651+ if (copy_to_user(buffer, newlink, len))
72652 len = -EFAULT;
72653 out:
72654 return len;
72655diff --git a/fs/namespace.c b/fs/namespace.c
72656index 38ed1e1..8500e56 100644
72657--- a/fs/namespace.c
72658+++ b/fs/namespace.c
72659@@ -1480,6 +1480,9 @@ static int do_umount(struct mount *mnt, int flags)
72660 if (!(sb->s_flags & MS_RDONLY))
72661 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
72662 up_write(&sb->s_umount);
72663+
72664+ gr_log_remount(mnt->mnt_devname, retval);
72665+
72666 return retval;
72667 }
72668
72669@@ -1502,6 +1505,9 @@ static int do_umount(struct mount *mnt, int flags)
72670 }
72671 unlock_mount_hash();
72672 namespace_unlock();
72673+
72674+ gr_log_unmount(mnt->mnt_devname, retval);
72675+
72676 return retval;
72677 }
72678
72679@@ -1559,7 +1565,7 @@ static inline bool may_mount(void)
72680 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
72681 */
72682
72683-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
72684+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
72685 {
72686 struct path path;
72687 struct mount *mnt;
72688@@ -1604,7 +1610,7 @@ out:
72689 /*
72690 * The 2.0 compatible umount. No flags.
72691 */
72692-SYSCALL_DEFINE1(oldumount, char __user *, name)
72693+SYSCALL_DEFINE1(oldumount, const char __user *, name)
72694 {
72695 return sys_umount(name, 0);
72696 }
72697@@ -2670,6 +2676,16 @@ long do_mount(const char *dev_name, const char __user *dir_name,
72698 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
72699 MS_STRICTATIME);
72700
72701+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
72702+ retval = -EPERM;
72703+ goto dput_out;
72704+ }
72705+
72706+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
72707+ retval = -EPERM;
72708+ goto dput_out;
72709+ }
72710+
72711 if (flags & MS_REMOUNT)
72712 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
72713 data_page);
72714@@ -2683,7 +2699,10 @@ long do_mount(const char *dev_name, const char __user *dir_name,
72715 retval = do_new_mount(&path, type_page, flags, mnt_flags,
72716 dev_name, data_page);
72717 dput_out:
72718+ gr_log_mount(dev_name, &path, retval);
72719+
72720 path_put(&path);
72721+
72722 return retval;
72723 }
72724
72725@@ -2701,7 +2720,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
72726 * number incrementing at 10Ghz will take 12,427 years to wrap which
72727 * is effectively never, so we can ignore the possibility.
72728 */
72729-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
72730+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
72731
72732 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
72733 {
72734@@ -2717,7 +2736,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
72735 return ERR_PTR(ret);
72736 }
72737 new_ns->ns.ops = &mntns_operations;
72738- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
72739+ new_ns->seq = atomic64_add_return_unchecked(1, &mnt_ns_seq);
72740 atomic_set(&new_ns->count, 1);
72741 new_ns->root = NULL;
72742 INIT_LIST_HEAD(&new_ns->list);
72743@@ -2727,7 +2746,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
72744 return new_ns;
72745 }
72746
72747-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
72748+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
72749 struct user_namespace *user_ns, struct fs_struct *new_fs)
72750 {
72751 struct mnt_namespace *new_ns;
72752@@ -2848,8 +2867,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
72753 }
72754 EXPORT_SYMBOL(mount_subtree);
72755
72756-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
72757- char __user *, type, unsigned long, flags, void __user *, data)
72758+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
72759+ const char __user *, type, unsigned long, flags, void __user *, data)
72760 {
72761 int ret;
72762 char *kernel_type;
72763@@ -2955,6 +2974,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
72764 if (error)
72765 goto out2;
72766
72767+ if (gr_handle_chroot_pivot()) {
72768+ error = -EPERM;
72769+ goto out2;
72770+ }
72771+
72772 get_fs_root(current->fs, &root);
72773 old_mp = lock_mount(&old);
72774 error = PTR_ERR(old_mp);
72775@@ -3235,7 +3259,7 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
72776 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
72777 return -EPERM;
72778
72779- if (fs->users != 1)
72780+ if (atomic_read(&fs->users) != 1)
72781 return -EINVAL;
72782
72783 get_mnt_ns(mnt_ns);
72784diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
72785index 19ca95c..b28702c 100644
72786--- a/fs/nfs/callback_xdr.c
72787+++ b/fs/nfs/callback_xdr.c
72788@@ -51,7 +51,7 @@ struct callback_op {
72789 callback_decode_arg_t decode_args;
72790 callback_encode_res_t encode_res;
72791 long res_maxsize;
72792-};
72793+} __do_const;
72794
72795 static struct callback_op callback_ops[];
72796
72797diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
72798index d42dff6..ecbdf42 100644
72799--- a/fs/nfs/inode.c
72800+++ b/fs/nfs/inode.c
72801@@ -1270,16 +1270,16 @@ static int nfs_ctime_need_update(const struct inode *inode, const struct nfs_fat
72802 return timespec_compare(&fattr->ctime, &inode->i_ctime) > 0;
72803 }
72804
72805-static atomic_long_t nfs_attr_generation_counter;
72806+static atomic_long_unchecked_t nfs_attr_generation_counter;
72807
72808 static unsigned long nfs_read_attr_generation_counter(void)
72809 {
72810- return atomic_long_read(&nfs_attr_generation_counter);
72811+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
72812 }
72813
72814 unsigned long nfs_inc_attr_generation_counter(void)
72815 {
72816- return atomic_long_inc_return(&nfs_attr_generation_counter);
72817+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
72818 }
72819 EXPORT_SYMBOL_GPL(nfs_inc_attr_generation_counter);
72820
72821diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
72822index 5416968..0942042 100644
72823--- a/fs/nfsd/nfs4proc.c
72824+++ b/fs/nfsd/nfs4proc.c
72825@@ -1496,7 +1496,7 @@ struct nfsd4_operation {
72826 nfsd4op_rsize op_rsize_bop;
72827 stateid_getter op_get_currentstateid;
72828 stateid_setter op_set_currentstateid;
72829-};
72830+} __do_const;
72831
72832 static struct nfsd4_operation nfsd4_ops[];
72833
72834diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
72835index 5b33ce1..c2a92aa 100644
72836--- a/fs/nfsd/nfs4xdr.c
72837+++ b/fs/nfsd/nfs4xdr.c
72838@@ -1703,7 +1703,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
72839
72840 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
72841
72842-static nfsd4_dec nfsd4_dec_ops[] = {
72843+static const nfsd4_dec nfsd4_dec_ops[] = {
72844 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
72845 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
72846 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
72847diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
72848index 46ec934..f384e41 100644
72849--- a/fs/nfsd/nfscache.c
72850+++ b/fs/nfsd/nfscache.c
72851@@ -541,7 +541,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
72852 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
72853 u32 hash;
72854 struct nfsd_drc_bucket *b;
72855- int len;
72856+ long len;
72857 size_t bufsize = 0;
72858
72859 if (!rp)
72860@@ -550,11 +550,14 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
72861 hash = nfsd_cache_hash(rp->c_xid);
72862 b = &drc_hashtbl[hash];
72863
72864- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
72865- len >>= 2;
72866+ if (statp) {
72867+ len = (char*)statp - (char*)resv->iov_base;
72868+ len = resv->iov_len - len;
72869+ len >>= 2;
72870+ }
72871
72872 /* Don't cache excessive amounts of data and XDR failures */
72873- if (!statp || len > (256 >> 2)) {
72874+ if (!statp || len > (256 >> 2) || len < 0) {
72875 nfsd_reply_cache_free(b, rp);
72876 return;
72877 }
72878@@ -562,7 +565,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
72879 switch (cachetype) {
72880 case RC_REPLSTAT:
72881 if (len != 1)
72882- printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
72883+ printk("nfsd: RC_REPLSTAT/reply len %ld!\n",len);
72884 rp->c_replstat = *statp;
72885 break;
72886 case RC_REPLBUFF:
72887diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
72888index 3685265..e77261e 100644
72889--- a/fs/nfsd/vfs.c
72890+++ b/fs/nfsd/vfs.c
72891@@ -893,7 +893,7 @@ __be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
72892
72893 oldfs = get_fs();
72894 set_fs(KERNEL_DS);
72895- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
72896+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
72897 set_fs(oldfs);
72898 return nfsd_finish_read(file, count, host_err);
72899 }
72900@@ -980,7 +980,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
72901
72902 /* Write the data. */
72903 oldfs = get_fs(); set_fs(KERNEL_DS);
72904- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
72905+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
72906 set_fs(oldfs);
72907 if (host_err < 0)
72908 goto out_nfserr;
72909@@ -1525,7 +1525,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
72910 */
72911
72912 oldfs = get_fs(); set_fs(KERNEL_DS);
72913- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
72914+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
72915 set_fs(oldfs);
72916
72917 if (host_err < 0)
72918diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
72919index 52ccd34..7a6b202 100644
72920--- a/fs/nls/nls_base.c
72921+++ b/fs/nls/nls_base.c
72922@@ -234,21 +234,25 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
72923
72924 int __register_nls(struct nls_table *nls, struct module *owner)
72925 {
72926- struct nls_table ** tmp = &tables;
72927+ struct nls_table *tmp = tables;
72928
72929 if (nls->next)
72930 return -EBUSY;
72931
72932- nls->owner = owner;
72933+ pax_open_kernel();
72934+ *(void **)&nls->owner = owner;
72935+ pax_close_kernel();
72936 spin_lock(&nls_lock);
72937- while (*tmp) {
72938- if (nls == *tmp) {
72939+ while (tmp) {
72940+ if (nls == tmp) {
72941 spin_unlock(&nls_lock);
72942 return -EBUSY;
72943 }
72944- tmp = &(*tmp)->next;
72945+ tmp = tmp->next;
72946 }
72947- nls->next = tables;
72948+ pax_open_kernel();
72949+ *(struct nls_table **)&nls->next = tables;
72950+ pax_close_kernel();
72951 tables = nls;
72952 spin_unlock(&nls_lock);
72953 return 0;
72954@@ -257,12 +261,14 @@ EXPORT_SYMBOL(__register_nls);
72955
72956 int unregister_nls(struct nls_table * nls)
72957 {
72958- struct nls_table ** tmp = &tables;
72959+ struct nls_table * const * tmp = &tables;
72960
72961 spin_lock(&nls_lock);
72962 while (*tmp) {
72963 if (nls == *tmp) {
72964- *tmp = nls->next;
72965+ pax_open_kernel();
72966+ *(struct nls_table **)tmp = nls->next;
72967+ pax_close_kernel();
72968 spin_unlock(&nls_lock);
72969 return 0;
72970 }
72971@@ -272,7 +278,7 @@ int unregister_nls(struct nls_table * nls)
72972 return -EINVAL;
72973 }
72974
72975-static struct nls_table *find_nls(char *charset)
72976+static struct nls_table *find_nls(const char *charset)
72977 {
72978 struct nls_table *nls;
72979 spin_lock(&nls_lock);
72980@@ -288,7 +294,7 @@ static struct nls_table *find_nls(char *charset)
72981 return nls;
72982 }
72983
72984-struct nls_table *load_nls(char *charset)
72985+struct nls_table *load_nls(const char *charset)
72986 {
72987 return try_then_request_module(find_nls(charset), "nls_%s", charset);
72988 }
72989diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
72990index 162b3f1..6076a7c 100644
72991--- a/fs/nls/nls_euc-jp.c
72992+++ b/fs/nls/nls_euc-jp.c
72993@@ -560,8 +560,10 @@ static int __init init_nls_euc_jp(void)
72994 p_nls = load_nls("cp932");
72995
72996 if (p_nls) {
72997- table.charset2upper = p_nls->charset2upper;
72998- table.charset2lower = p_nls->charset2lower;
72999+ pax_open_kernel();
73000+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
73001+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
73002+ pax_close_kernel();
73003 return register_nls(&table);
73004 }
73005
73006diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
73007index a80a741..7b96e1b 100644
73008--- a/fs/nls/nls_koi8-ru.c
73009+++ b/fs/nls/nls_koi8-ru.c
73010@@ -62,8 +62,10 @@ static int __init init_nls_koi8_ru(void)
73011 p_nls = load_nls("koi8-u");
73012
73013 if (p_nls) {
73014- table.charset2upper = p_nls->charset2upper;
73015- table.charset2lower = p_nls->charset2lower;
73016+ pax_open_kernel();
73017+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
73018+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
73019+ pax_close_kernel();
73020 return register_nls(&table);
73021 }
73022
73023diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
73024index cf27550..6c70f29d 100644
73025--- a/fs/notify/fanotify/fanotify_user.c
73026+++ b/fs/notify/fanotify/fanotify_user.c
73027@@ -216,8 +216,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
73028
73029 fd = fanotify_event_metadata.fd;
73030 ret = -EFAULT;
73031- if (copy_to_user(buf, &fanotify_event_metadata,
73032- fanotify_event_metadata.event_len))
73033+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
73034+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
73035 goto out_close_fd;
73036
73037 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
73038diff --git a/fs/notify/notification.c b/fs/notify/notification.c
73039index a95d8e0..a91a5fd 100644
73040--- a/fs/notify/notification.c
73041+++ b/fs/notify/notification.c
73042@@ -48,7 +48,7 @@
73043 #include <linux/fsnotify_backend.h>
73044 #include "fsnotify.h"
73045
73046-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
73047+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
73048
73049 /**
73050 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
73051@@ -56,7 +56,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
73052 */
73053 u32 fsnotify_get_cookie(void)
73054 {
73055- return atomic_inc_return(&fsnotify_sync_cookie);
73056+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
73057 }
73058 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
73059
73060diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
73061index 9e38daf..5727cae 100644
73062--- a/fs/ntfs/dir.c
73063+++ b/fs/ntfs/dir.c
73064@@ -1310,7 +1310,7 @@ find_next_index_buffer:
73065 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
73066 ~(s64)(ndir->itype.index.block_size - 1)));
73067 /* Bounds checks. */
73068- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
73069+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
73070 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
73071 "inode 0x%lx or driver bug.", vdir->i_ino);
73072 goto err_out;
73073diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
73074index 1da9b2d..9cca092a 100644
73075--- a/fs/ntfs/file.c
73076+++ b/fs/ntfs/file.c
73077@@ -1281,7 +1281,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
73078 char *addr;
73079 size_t total = 0;
73080 unsigned len;
73081- int left;
73082+ unsigned left;
73083
73084 do {
73085 len = PAGE_CACHE_SIZE - ofs;
73086diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
73087index 9e1e112..241a52a 100644
73088--- a/fs/ntfs/super.c
73089+++ b/fs/ntfs/super.c
73090@@ -688,7 +688,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
73091 if (!silent)
73092 ntfs_error(sb, "Primary boot sector is invalid.");
73093 } else if (!silent)
73094- ntfs_error(sb, read_err_str, "primary");
73095+ ntfs_error(sb, read_err_str, "%s", "primary");
73096 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
73097 if (bh_primary)
73098 brelse(bh_primary);
73099@@ -704,7 +704,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
73100 goto hotfix_primary_boot_sector;
73101 brelse(bh_backup);
73102 } else if (!silent)
73103- ntfs_error(sb, read_err_str, "backup");
73104+ ntfs_error(sb, read_err_str, "%s", "backup");
73105 /* Try to read NT3.51- backup boot sector. */
73106 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
73107 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
73108@@ -715,7 +715,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
73109 "sector.");
73110 brelse(bh_backup);
73111 } else if (!silent)
73112- ntfs_error(sb, read_err_str, "backup");
73113+ ntfs_error(sb, read_err_str, "%s", "backup");
73114 /* We failed. Cleanup and return. */
73115 if (bh_primary)
73116 brelse(bh_primary);
73117diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
73118index 0440134..d52c93a 100644
73119--- a/fs/ocfs2/localalloc.c
73120+++ b/fs/ocfs2/localalloc.c
73121@@ -1320,7 +1320,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
73122 goto bail;
73123 }
73124
73125- atomic_inc(&osb->alloc_stats.moves);
73126+ atomic_inc_unchecked(&osb->alloc_stats.moves);
73127
73128 bail:
73129 if (handle)
73130diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
73131index 460c6c3..b4ef513 100644
73132--- a/fs/ocfs2/ocfs2.h
73133+++ b/fs/ocfs2/ocfs2.h
73134@@ -247,11 +247,11 @@ enum ocfs2_vol_state
73135
73136 struct ocfs2_alloc_stats
73137 {
73138- atomic_t moves;
73139- atomic_t local_data;
73140- atomic_t bitmap_data;
73141- atomic_t bg_allocs;
73142- atomic_t bg_extends;
73143+ atomic_unchecked_t moves;
73144+ atomic_unchecked_t local_data;
73145+ atomic_unchecked_t bitmap_data;
73146+ atomic_unchecked_t bg_allocs;
73147+ atomic_unchecked_t bg_extends;
73148 };
73149
73150 enum ocfs2_local_alloc_state
73151diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
73152index ee541f9..df3a500 100644
73153--- a/fs/ocfs2/refcounttree.c
73154+++ b/fs/ocfs2/refcounttree.c
73155@@ -4276,7 +4276,7 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
73156 error = posix_acl_create(dir, &mode, &default_acl, &acl);
73157 if (error) {
73158 mlog_errno(error);
73159- goto out;
73160+ return error;
73161 }
73162
73163 error = ocfs2_create_inode_in_orphan(dir, mode,
73164diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
73165index 0cb889a..6a26b24 100644
73166--- a/fs/ocfs2/suballoc.c
73167+++ b/fs/ocfs2/suballoc.c
73168@@ -867,7 +867,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
73169 mlog_errno(status);
73170 goto bail;
73171 }
73172- atomic_inc(&osb->alloc_stats.bg_extends);
73173+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
73174
73175 /* You should never ask for this much metadata */
73176 BUG_ON(bits_wanted >
73177@@ -2014,7 +2014,7 @@ int ocfs2_claim_metadata(handle_t *handle,
73178 mlog_errno(status);
73179 goto bail;
73180 }
73181- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
73182+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
73183
73184 *suballoc_loc = res.sr_bg_blkno;
73185 *suballoc_bit_start = res.sr_bit_offset;
73186@@ -2180,7 +2180,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
73187 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
73188 res->sr_bits);
73189
73190- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
73191+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
73192
73193 BUG_ON(res->sr_bits != 1);
73194
73195@@ -2222,7 +2222,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
73196 mlog_errno(status);
73197 goto bail;
73198 }
73199- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
73200+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
73201
73202 BUG_ON(res.sr_bits != 1);
73203
73204@@ -2326,7 +2326,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
73205 cluster_start,
73206 num_clusters);
73207 if (!status)
73208- atomic_inc(&osb->alloc_stats.local_data);
73209+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
73210 } else {
73211 if (min_clusters > (osb->bitmap_cpg - 1)) {
73212 /* The only paths asking for contiguousness
73213@@ -2352,7 +2352,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
73214 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
73215 res.sr_bg_blkno,
73216 res.sr_bit_offset);
73217- atomic_inc(&osb->alloc_stats.bitmap_data);
73218+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
73219 *num_clusters = res.sr_bits;
73220 }
73221 }
73222diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
73223index 2667518..24bcf79 100644
73224--- a/fs/ocfs2/super.c
73225+++ b/fs/ocfs2/super.c
73226@@ -308,11 +308,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
73227 "%10s => GlobalAllocs: %d LocalAllocs: %d "
73228 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
73229 "Stats",
73230- atomic_read(&osb->alloc_stats.bitmap_data),
73231- atomic_read(&osb->alloc_stats.local_data),
73232- atomic_read(&osb->alloc_stats.bg_allocs),
73233- atomic_read(&osb->alloc_stats.moves),
73234- atomic_read(&osb->alloc_stats.bg_extends));
73235+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
73236+ atomic_read_unchecked(&osb->alloc_stats.local_data),
73237+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
73238+ atomic_read_unchecked(&osb->alloc_stats.moves),
73239+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
73240
73241 out += snprintf(buf + out, len - out,
73242 "%10s => State: %u Descriptor: %llu Size: %u bits "
73243@@ -2093,11 +2093,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
73244
73245 mutex_init(&osb->system_file_mutex);
73246
73247- atomic_set(&osb->alloc_stats.moves, 0);
73248- atomic_set(&osb->alloc_stats.local_data, 0);
73249- atomic_set(&osb->alloc_stats.bitmap_data, 0);
73250- atomic_set(&osb->alloc_stats.bg_allocs, 0);
73251- atomic_set(&osb->alloc_stats.bg_extends, 0);
73252+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
73253+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
73254+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
73255+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
73256+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
73257
73258 /* Copy the blockcheck stats from the superblock probe */
73259 osb->osb_ecc_stats = *stats;
73260diff --git a/fs/open.c b/fs/open.c
73261index 44a3be1..5e97aa1 100644
73262--- a/fs/open.c
73263+++ b/fs/open.c
73264@@ -32,6 +32,8 @@
73265 #include <linux/dnotify.h>
73266 #include <linux/compat.h>
73267
73268+#define CREATE_TRACE_POINTS
73269+#include <trace/events/fs.h>
73270 #include "internal.h"
73271
73272 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
73273@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
73274 error = locks_verify_truncate(inode, NULL, length);
73275 if (!error)
73276 error = security_path_truncate(path);
73277+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
73278+ error = -EACCES;
73279 if (!error)
73280 error = do_truncate(path->dentry, length, 0, NULL);
73281
73282@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
73283 error = locks_verify_truncate(inode, f.file, length);
73284 if (!error)
73285 error = security_path_truncate(&f.file->f_path);
73286+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
73287+ error = -EACCES;
73288 if (!error)
73289 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
73290 sb_end_write(inode->i_sb);
73291@@ -392,6 +398,9 @@ retry:
73292 if (__mnt_is_readonly(path.mnt))
73293 res = -EROFS;
73294
73295+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
73296+ res = -EACCES;
73297+
73298 out_path_release:
73299 path_put(&path);
73300 if (retry_estale(res, lookup_flags)) {
73301@@ -423,6 +432,8 @@ retry:
73302 if (error)
73303 goto dput_and_out;
73304
73305+ gr_log_chdir(path.dentry, path.mnt);
73306+
73307 set_fs_pwd(current->fs, &path);
73308
73309 dput_and_out:
73310@@ -452,6 +463,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
73311 goto out_putf;
73312
73313 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
73314+
73315+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
73316+ error = -EPERM;
73317+
73318+ if (!error)
73319+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
73320+
73321 if (!error)
73322 set_fs_pwd(current->fs, &f.file->f_path);
73323 out_putf:
73324@@ -481,7 +499,13 @@ retry:
73325 if (error)
73326 goto dput_and_out;
73327
73328+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
73329+ goto dput_and_out;
73330+
73331 set_fs_root(current->fs, &path);
73332+
73333+ gr_handle_chroot_chdir(&path);
73334+
73335 error = 0;
73336 dput_and_out:
73337 path_put(&path);
73338@@ -505,6 +529,16 @@ static int chmod_common(struct path *path, umode_t mode)
73339 return error;
73340 retry_deleg:
73341 mutex_lock(&inode->i_mutex);
73342+
73343+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
73344+ error = -EACCES;
73345+ goto out_unlock;
73346+ }
73347+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
73348+ error = -EACCES;
73349+ goto out_unlock;
73350+ }
73351+
73352 error = security_path_chmod(path, mode);
73353 if (error)
73354 goto out_unlock;
73355@@ -570,6 +604,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
73356 uid = make_kuid(current_user_ns(), user);
73357 gid = make_kgid(current_user_ns(), group);
73358
73359+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
73360+ return -EACCES;
73361+
73362 retry_deleg:
73363 newattrs.ia_valid = ATTR_CTIME;
73364 if (user != (uid_t) -1) {
73365@@ -1017,6 +1054,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
73366 } else {
73367 fsnotify_open(f);
73368 fd_install(fd, f);
73369+ trace_do_sys_open(tmp->name, flags, mode);
73370 }
73371 }
73372 putname(tmp);
73373diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
73374index 5f0d199..13b74b9 100644
73375--- a/fs/overlayfs/super.c
73376+++ b/fs/overlayfs/super.c
73377@@ -172,7 +172,7 @@ void ovl_path_lower(struct dentry *dentry, struct path *path)
73378 {
73379 struct ovl_entry *oe = dentry->d_fsdata;
73380
73381- *path = oe->numlower ? oe->lowerstack[0] : (struct path) { NULL, NULL };
73382+ *path = oe->numlower ? oe->lowerstack[0] : (struct path) { .dentry = NULL, .mnt = NULL };
73383 }
73384
73385 int ovl_want_write(struct dentry *dentry)
73386@@ -816,8 +816,8 @@ static unsigned int ovl_split_lowerdirs(char *str)
73387
73388 static int ovl_fill_super(struct super_block *sb, void *data, int silent)
73389 {
73390- struct path upperpath = { NULL, NULL };
73391- struct path workpath = { NULL, NULL };
73392+ struct path upperpath = { .dentry = NULL, .mnt = NULL };
73393+ struct path workpath = { .dentry = NULL, .mnt = NULL };
73394 struct dentry *root_dentry;
73395 struct ovl_entry *oe;
73396 struct ovl_fs *ufs;
73397diff --git a/fs/pipe.c b/fs/pipe.c
73398index 21981e5..2c0bffb 100644
73399--- a/fs/pipe.c
73400+++ b/fs/pipe.c
73401@@ -37,7 +37,7 @@ unsigned int pipe_max_size = 1048576;
73402 /*
73403 * Minimum pipe size, as required by POSIX
73404 */
73405-unsigned int pipe_min_size = PAGE_SIZE;
73406+unsigned int pipe_min_size __read_only = PAGE_SIZE;
73407
73408 /*
73409 * We use a start+len construction, which provides full use of the
73410@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
73411
73412 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
73413 {
73414- if (pipe->files)
73415+ if (atomic_read(&pipe->files))
73416 mutex_lock_nested(&pipe->mutex, subclass);
73417 }
73418
73419@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
73420
73421 void pipe_unlock(struct pipe_inode_info *pipe)
73422 {
73423- if (pipe->files)
73424+ if (atomic_read(&pipe->files))
73425 mutex_unlock(&pipe->mutex);
73426 }
73427 EXPORT_SYMBOL(pipe_unlock);
73428@@ -292,9 +292,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
73429 }
73430 if (bufs) /* More to do? */
73431 continue;
73432- if (!pipe->writers)
73433+ if (!atomic_read(&pipe->writers))
73434 break;
73435- if (!pipe->waiting_writers) {
73436+ if (!atomic_read(&pipe->waiting_writers)) {
73437 /* syscall merging: Usually we must not sleep
73438 * if O_NONBLOCK is set, or if we got some data.
73439 * But if a writer sleeps in kernel space, then
73440@@ -351,7 +351,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
73441
73442 __pipe_lock(pipe);
73443
73444- if (!pipe->readers) {
73445+ if (!atomic_read(&pipe->readers)) {
73446 send_sig(SIGPIPE, current, 0);
73447 ret = -EPIPE;
73448 goto out;
73449@@ -387,7 +387,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
73450 for (;;) {
73451 int bufs;
73452
73453- if (!pipe->readers) {
73454+ if (!atomic_read(&pipe->readers)) {
73455 send_sig(SIGPIPE, current, 0);
73456 if (!ret)
73457 ret = -EPIPE;
73458@@ -455,9 +455,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
73459 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
73460 do_wakeup = 0;
73461 }
73462- pipe->waiting_writers++;
73463+ atomic_inc(&pipe->waiting_writers);
73464 pipe_wait(pipe);
73465- pipe->waiting_writers--;
73466+ atomic_dec(&pipe->waiting_writers);
73467 }
73468 out:
73469 __pipe_unlock(pipe);
73470@@ -512,7 +512,7 @@ pipe_poll(struct file *filp, poll_table *wait)
73471 mask = 0;
73472 if (filp->f_mode & FMODE_READ) {
73473 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
73474- if (!pipe->writers && filp->f_version != pipe->w_counter)
73475+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
73476 mask |= POLLHUP;
73477 }
73478
73479@@ -522,7 +522,7 @@ pipe_poll(struct file *filp, poll_table *wait)
73480 * Most Unices do not set POLLERR for FIFOs but on Linux they
73481 * behave exactly like pipes for poll().
73482 */
73483- if (!pipe->readers)
73484+ if (!atomic_read(&pipe->readers))
73485 mask |= POLLERR;
73486 }
73487
73488@@ -534,7 +534,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
73489 int kill = 0;
73490
73491 spin_lock(&inode->i_lock);
73492- if (!--pipe->files) {
73493+ if (atomic_dec_and_test(&pipe->files)) {
73494 inode->i_pipe = NULL;
73495 kill = 1;
73496 }
73497@@ -551,11 +551,11 @@ pipe_release(struct inode *inode, struct file *file)
73498
73499 __pipe_lock(pipe);
73500 if (file->f_mode & FMODE_READ)
73501- pipe->readers--;
73502+ atomic_dec(&pipe->readers);
73503 if (file->f_mode & FMODE_WRITE)
73504- pipe->writers--;
73505+ atomic_dec(&pipe->writers);
73506
73507- if (pipe->readers || pipe->writers) {
73508+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
73509 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
73510 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
73511 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
73512@@ -620,7 +620,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
73513 kfree(pipe);
73514 }
73515
73516-static struct vfsmount *pipe_mnt __read_mostly;
73517+struct vfsmount *pipe_mnt __read_mostly;
73518
73519 /*
73520 * pipefs_dname() is called from d_path().
73521@@ -650,8 +650,9 @@ static struct inode * get_pipe_inode(void)
73522 goto fail_iput;
73523
73524 inode->i_pipe = pipe;
73525- pipe->files = 2;
73526- pipe->readers = pipe->writers = 1;
73527+ atomic_set(&pipe->files, 2);
73528+ atomic_set(&pipe->readers, 1);
73529+ atomic_set(&pipe->writers, 1);
73530 inode->i_fop = &pipefifo_fops;
73531
73532 /*
73533@@ -830,17 +831,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
73534 spin_lock(&inode->i_lock);
73535 if (inode->i_pipe) {
73536 pipe = inode->i_pipe;
73537- pipe->files++;
73538+ atomic_inc(&pipe->files);
73539 spin_unlock(&inode->i_lock);
73540 } else {
73541 spin_unlock(&inode->i_lock);
73542 pipe = alloc_pipe_info();
73543 if (!pipe)
73544 return -ENOMEM;
73545- pipe->files = 1;
73546+ atomic_set(&pipe->files, 1);
73547 spin_lock(&inode->i_lock);
73548 if (unlikely(inode->i_pipe)) {
73549- inode->i_pipe->files++;
73550+ atomic_inc(&inode->i_pipe->files);
73551 spin_unlock(&inode->i_lock);
73552 free_pipe_info(pipe);
73553 pipe = inode->i_pipe;
73554@@ -865,10 +866,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
73555 * opened, even when there is no process writing the FIFO.
73556 */
73557 pipe->r_counter++;
73558- if (pipe->readers++ == 0)
73559+ if (atomic_inc_return(&pipe->readers) == 1)
73560 wake_up_partner(pipe);
73561
73562- if (!is_pipe && !pipe->writers) {
73563+ if (!is_pipe && !atomic_read(&pipe->writers)) {
73564 if ((filp->f_flags & O_NONBLOCK)) {
73565 /* suppress POLLHUP until we have
73566 * seen a writer */
73567@@ -887,14 +888,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
73568 * errno=ENXIO when there is no process reading the FIFO.
73569 */
73570 ret = -ENXIO;
73571- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
73572+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
73573 goto err;
73574
73575 pipe->w_counter++;
73576- if (!pipe->writers++)
73577+ if (atomic_inc_return(&pipe->writers) == 1)
73578 wake_up_partner(pipe);
73579
73580- if (!is_pipe && !pipe->readers) {
73581+ if (!is_pipe && !atomic_read(&pipe->readers)) {
73582 if (wait_for_partner(pipe, &pipe->r_counter))
73583 goto err_wr;
73584 }
73585@@ -908,11 +909,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
73586 * the process can at least talk to itself.
73587 */
73588
73589- pipe->readers++;
73590- pipe->writers++;
73591+ atomic_inc(&pipe->readers);
73592+ atomic_inc(&pipe->writers);
73593 pipe->r_counter++;
73594 pipe->w_counter++;
73595- if (pipe->readers == 1 || pipe->writers == 1)
73596+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
73597 wake_up_partner(pipe);
73598 break;
73599
73600@@ -926,13 +927,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
73601 return 0;
73602
73603 err_rd:
73604- if (!--pipe->readers)
73605+ if (atomic_dec_and_test(&pipe->readers))
73606 wake_up_interruptible(&pipe->wait);
73607 ret = -ERESTARTSYS;
73608 goto err;
73609
73610 err_wr:
73611- if (!--pipe->writers)
73612+ if (atomic_dec_and_test(&pipe->writers))
73613 wake_up_interruptible(&pipe->wait);
73614 ret = -ERESTARTSYS;
73615 goto err;
73616@@ -1010,7 +1011,7 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
73617 * Currently we rely on the pipe array holding a power-of-2 number
73618 * of pages.
73619 */
73620-static inline unsigned int round_pipe_size(unsigned int size)
73621+static inline unsigned long round_pipe_size(unsigned long size)
73622 {
73623 unsigned long nr_pages;
73624
73625@@ -1058,13 +1059,16 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
73626
73627 switch (cmd) {
73628 case F_SETPIPE_SZ: {
73629- unsigned int size, nr_pages;
73630+ unsigned long size, nr_pages;
73631+
73632+ ret = -EINVAL;
73633+ if (arg < pipe_min_size)
73634+ goto out;
73635
73636 size = round_pipe_size(arg);
73637 nr_pages = size >> PAGE_SHIFT;
73638
73639- ret = -EINVAL;
73640- if (!nr_pages)
73641+ if (size < pipe_min_size)
73642 goto out;
73643
73644 if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
73645diff --git a/fs/posix_acl.c b/fs/posix_acl.c
73646index 3a48bb7..403067b 100644
73647--- a/fs/posix_acl.c
73648+++ b/fs/posix_acl.c
73649@@ -20,6 +20,7 @@
73650 #include <linux/xattr.h>
73651 #include <linux/export.h>
73652 #include <linux/user_namespace.h>
73653+#include <linux/grsecurity.h>
73654
73655 struct posix_acl **acl_by_type(struct inode *inode, int type)
73656 {
73657@@ -277,7 +278,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
73658 }
73659 }
73660 if (mode_p)
73661- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
73662+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
73663 return not_equiv;
73664 }
73665 EXPORT_SYMBOL(posix_acl_equiv_mode);
73666@@ -427,7 +428,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
73667 mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
73668 }
73669
73670- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
73671+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
73672 return not_equiv;
73673 }
73674
73675@@ -485,6 +486,8 @@ __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
73676 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
73677 int err = -ENOMEM;
73678 if (clone) {
73679+ *mode_p &= ~gr_acl_umask();
73680+
73681 err = posix_acl_create_masq(clone, mode_p);
73682 if (err < 0) {
73683 posix_acl_release(clone);
73684@@ -663,11 +666,12 @@ struct posix_acl *
73685 posix_acl_from_xattr(struct user_namespace *user_ns,
73686 const void *value, size_t size)
73687 {
73688- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
73689- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
73690+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
73691+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
73692 int count;
73693 struct posix_acl *acl;
73694 struct posix_acl_entry *acl_e;
73695+ umode_t umask = gr_acl_umask();
73696
73697 if (!value)
73698 return NULL;
73699@@ -693,12 +697,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
73700
73701 switch(acl_e->e_tag) {
73702 case ACL_USER_OBJ:
73703+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
73704+ break;
73705 case ACL_GROUP_OBJ:
73706 case ACL_MASK:
73707+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
73708+ break;
73709 case ACL_OTHER:
73710+ acl_e->e_perm &= ~(umask & S_IRWXO);
73711 break;
73712
73713 case ACL_USER:
73714+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
73715 acl_e->e_uid =
73716 make_kuid(user_ns,
73717 le32_to_cpu(entry->e_id));
73718@@ -706,6 +716,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
73719 goto fail;
73720 break;
73721 case ACL_GROUP:
73722+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
73723 acl_e->e_gid =
73724 make_kgid(user_ns,
73725 le32_to_cpu(entry->e_id));
73726diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
73727index 2183fcf..3c32a98 100644
73728--- a/fs/proc/Kconfig
73729+++ b/fs/proc/Kconfig
73730@@ -30,7 +30,7 @@ config PROC_FS
73731
73732 config PROC_KCORE
73733 bool "/proc/kcore support" if !ARM
73734- depends on PROC_FS && MMU
73735+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
73736 help
73737 Provides a virtual ELF core file of the live kernel. This can
73738 be read with gdb and other ELF tools. No modifications can be
73739@@ -38,8 +38,8 @@ config PROC_KCORE
73740
73741 config PROC_VMCORE
73742 bool "/proc/vmcore support"
73743- depends on PROC_FS && CRASH_DUMP
73744- default y
73745+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
73746+ default n
73747 help
73748 Exports the dump image of crashed kernel in ELF format.
73749
73750@@ -63,8 +63,8 @@ config PROC_SYSCTL
73751 limited in memory.
73752
73753 config PROC_PAGE_MONITOR
73754- default y
73755- depends on PROC_FS && MMU
73756+ default n
73757+ depends on PROC_FS && MMU && !GRKERNSEC
73758 bool "Enable /proc page monitoring" if EXPERT
73759 help
73760 Various /proc files exist to monitor process memory utilization:
73761diff --git a/fs/proc/array.c b/fs/proc/array.c
73762index 1295a00..4c91a6b 100644
73763--- a/fs/proc/array.c
73764+++ b/fs/proc/array.c
73765@@ -60,6 +60,7 @@
73766 #include <linux/tty.h>
73767 #include <linux/string.h>
73768 #include <linux/mman.h>
73769+#include <linux/grsecurity.h>
73770 #include <linux/proc_fs.h>
73771 #include <linux/ioport.h>
73772 #include <linux/uaccess.h>
73773@@ -322,6 +323,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
73774 cpumask_pr_args(&task->cpus_allowed));
73775 }
73776
73777+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
73778+static inline void task_pax(struct seq_file *m, struct task_struct *p)
73779+{
73780+ if (p->mm)
73781+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
73782+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
73783+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
73784+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
73785+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
73786+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
73787+ else
73788+ seq_printf(m, "PaX:\t-----\n");
73789+}
73790+#endif
73791+
73792 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
73793 struct pid *pid, struct task_struct *task)
73794 {
73795@@ -340,9 +356,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
73796 task_cpus_allowed(m, task);
73797 cpuset_task_status_allowed(m, task);
73798 task_context_switch_counts(m, task);
73799+
73800+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
73801+ task_pax(m, task);
73802+#endif
73803+
73804+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
73805+ task_grsec_rbac(m, task);
73806+#endif
73807+
73808 return 0;
73809 }
73810
73811+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73812+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
73813+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
73814+ _mm->pax_flags & MF_PAX_SEGMEXEC))
73815+#endif
73816+
73817 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
73818 struct pid *pid, struct task_struct *task, int whole)
73819 {
73820@@ -364,6 +395,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
73821 char tcomm[sizeof(task->comm)];
73822 unsigned long flags;
73823
73824+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73825+ if (current->exec_id != m->exec_id) {
73826+ gr_log_badprocpid("stat");
73827+ return 0;
73828+ }
73829+#endif
73830+
73831 state = *get_task_state(task);
73832 vsize = eip = esp = 0;
73833 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
73834@@ -434,6 +472,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
73835 gtime = task_gtime(task);
73836 }
73837
73838+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73839+ if (PAX_RAND_FLAGS(mm)) {
73840+ eip = 0;
73841+ esp = 0;
73842+ wchan = 0;
73843+ }
73844+#endif
73845+#ifdef CONFIG_GRKERNSEC_HIDESYM
73846+ wchan = 0;
73847+ eip =0;
73848+ esp =0;
73849+#endif
73850+
73851 /* scale priority and nice values from timeslices to -20..20 */
73852 /* to make it look like a "normal" Unix priority/nice value */
73853 priority = task_prio(task);
73854@@ -465,9 +516,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
73855 seq_put_decimal_ull(m, ' ', vsize);
73856 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
73857 seq_put_decimal_ull(m, ' ', rsslim);
73858+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73859+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
73860+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
73861+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
73862+#else
73863 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
73864 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
73865 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
73866+#endif
73867 seq_put_decimal_ull(m, ' ', esp);
73868 seq_put_decimal_ull(m, ' ', eip);
73869 /* The signal information here is obsolete.
73870@@ -489,7 +546,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
73871 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
73872 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
73873
73874- if (mm && permitted) {
73875+ if (mm && permitted
73876+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73877+ && !PAX_RAND_FLAGS(mm)
73878+#endif
73879+ ) {
73880 seq_put_decimal_ull(m, ' ', mm->start_data);
73881 seq_put_decimal_ull(m, ' ', mm->end_data);
73882 seq_put_decimal_ull(m, ' ', mm->start_brk);
73883@@ -527,8 +588,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
73884 struct pid *pid, struct task_struct *task)
73885 {
73886 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
73887- struct mm_struct *mm = get_task_mm(task);
73888+ struct mm_struct *mm;
73889
73890+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73891+ if (current->exec_id != m->exec_id) {
73892+ gr_log_badprocpid("statm");
73893+ return 0;
73894+ }
73895+#endif
73896+ mm = get_task_mm(task);
73897 if (mm) {
73898 size = task_statm(mm, &shared, &text, &data, &resident);
73899 mmput(mm);
73900@@ -551,6 +619,20 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
73901 return 0;
73902 }
73903
73904+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
73905+int proc_pid_ipaddr(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task)
73906+{
73907+ unsigned long flags;
73908+ u32 curr_ip = 0;
73909+
73910+ if (lock_task_sighand(task, &flags)) {
73911+ curr_ip = task->signal->curr_ip;
73912+ unlock_task_sighand(task, &flags);
73913+ }
73914+ return seq_printf(m, "%pI4\n", &curr_ip);
73915+}
73916+#endif
73917+
73918 #ifdef CONFIG_CHECKPOINT_RESTORE
73919 static struct pid *
73920 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
73921diff --git a/fs/proc/base.c b/fs/proc/base.c
73922index 3f3d7ae..68de109 100644
73923--- a/fs/proc/base.c
73924+++ b/fs/proc/base.c
73925@@ -113,6 +113,14 @@ struct pid_entry {
73926 union proc_op op;
73927 };
73928
73929+struct getdents_callback {
73930+ struct linux_dirent __user * current_dir;
73931+ struct linux_dirent __user * previous;
73932+ struct file * file;
73933+ int count;
73934+ int error;
73935+};
73936+
73937 #define NOD(NAME, MODE, IOP, FOP, OP) { \
73938 .name = (NAME), \
73939 .len = sizeof(NAME) - 1, \
73940@@ -208,12 +216,28 @@ static int proc_pid_cmdline(struct seq_file *m, struct pid_namespace *ns,
73941 return 0;
73942 }
73943
73944+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73945+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
73946+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
73947+ _mm->pax_flags & MF_PAX_SEGMEXEC))
73948+#endif
73949+
73950 static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
73951 struct pid *pid, struct task_struct *task)
73952 {
73953 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
73954 if (mm && !IS_ERR(mm)) {
73955 unsigned int nwords = 0;
73956+
73957+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73958+ /* allow if we're currently ptracing this task */
73959+ if (PAX_RAND_FLAGS(mm) &&
73960+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
73961+ mmput(mm);
73962+ return 0;
73963+ }
73964+#endif
73965+
73966 do {
73967 nwords += 2;
73968 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
73969@@ -225,7 +249,7 @@ static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
73970 }
73971
73972
73973-#ifdef CONFIG_KALLSYMS
73974+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
73975 /*
73976 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
73977 * Returns the resolved symbol. If that fails, simply return the address.
73978@@ -265,7 +289,7 @@ static void unlock_trace(struct task_struct *task)
73979 mutex_unlock(&task->signal->cred_guard_mutex);
73980 }
73981
73982-#ifdef CONFIG_STACKTRACE
73983+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
73984
73985 #define MAX_STACK_TRACE_DEPTH 64
73986
73987@@ -456,7 +480,7 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
73988 return 0;
73989 }
73990
73991-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
73992+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
73993 static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
73994 struct pid *pid, struct task_struct *task)
73995 {
73996@@ -486,7 +510,7 @@ static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
73997 /************************************************************************/
73998
73999 /* permission checks */
74000-static int proc_fd_access_allowed(struct inode *inode)
74001+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
74002 {
74003 struct task_struct *task;
74004 int allowed = 0;
74005@@ -496,7 +520,10 @@ static int proc_fd_access_allowed(struct inode *inode)
74006 */
74007 task = get_proc_task(inode);
74008 if (task) {
74009- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
74010+ if (log)
74011+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
74012+ else
74013+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
74014 put_task_struct(task);
74015 }
74016 return allowed;
74017@@ -527,10 +554,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
74018 struct task_struct *task,
74019 int hide_pid_min)
74020 {
74021+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
74022+ return false;
74023+
74024+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74025+ rcu_read_lock();
74026+ {
74027+ const struct cred *tmpcred = current_cred();
74028+ const struct cred *cred = __task_cred(task);
74029+
74030+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
74031+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74032+ || in_group_p(grsec_proc_gid)
74033+#endif
74034+ ) {
74035+ rcu_read_unlock();
74036+ return true;
74037+ }
74038+ }
74039+ rcu_read_unlock();
74040+
74041+ if (!pid->hide_pid)
74042+ return false;
74043+#endif
74044+
74045 if (pid->hide_pid < hide_pid_min)
74046 return true;
74047 if (in_group_p(pid->pid_gid))
74048 return true;
74049+
74050 return ptrace_may_access(task, PTRACE_MODE_READ);
74051 }
74052
74053@@ -548,7 +600,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
74054 put_task_struct(task);
74055
74056 if (!has_perms) {
74057+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74058+ {
74059+#else
74060 if (pid->hide_pid == 2) {
74061+#endif
74062 /*
74063 * Let's make getdents(), stat(), and open()
74064 * consistent with each other. If a process
74065@@ -609,6 +665,10 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
74066
74067 if (task) {
74068 mm = mm_access(task, mode);
74069+ if (!IS_ERR_OR_NULL(mm) && gr_acl_handle_procpidmem(task)) {
74070+ mmput(mm);
74071+ mm = ERR_PTR(-EPERM);
74072+ }
74073 put_task_struct(task);
74074
74075 if (!IS_ERR_OR_NULL(mm)) {
74076@@ -630,6 +690,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
74077 return PTR_ERR(mm);
74078
74079 file->private_data = mm;
74080+
74081+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
74082+ file->f_version = current->exec_id;
74083+#endif
74084+
74085 return 0;
74086 }
74087
74088@@ -651,6 +716,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
74089 ssize_t copied;
74090 char *page;
74091
74092+#ifdef CONFIG_GRKERNSEC
74093+ if (write)
74094+ return -EPERM;
74095+#endif
74096+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
74097+ if (file->f_version != current->exec_id) {
74098+ gr_log_badprocpid("mem");
74099+ return 0;
74100+ }
74101+#endif
74102+
74103 if (!mm)
74104 return 0;
74105
74106@@ -663,7 +739,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
74107 goto free;
74108
74109 while (count > 0) {
74110- int this_len = min_t(int, count, PAGE_SIZE);
74111+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
74112
74113 if (write && copy_from_user(page, buf, this_len)) {
74114 copied = -EFAULT;
74115@@ -755,6 +831,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
74116 if (!mm)
74117 return 0;
74118
74119+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
74120+ if (file->f_version != current->exec_id) {
74121+ gr_log_badprocpid("environ");
74122+ return 0;
74123+ }
74124+#endif
74125+
74126 page = (char *)__get_free_page(GFP_TEMPORARY);
74127 if (!page)
74128 return -ENOMEM;
74129@@ -764,7 +847,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
74130 goto free;
74131 while (count > 0) {
74132 size_t this_len, max_len;
74133- int retval;
74134+ ssize_t retval;
74135
74136 if (src >= (mm->env_end - mm->env_start))
74137 break;
74138@@ -1378,7 +1461,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
74139 int error = -EACCES;
74140
74141 /* Are we allowed to snoop on the tasks file descriptors? */
74142- if (!proc_fd_access_allowed(inode))
74143+ if (!proc_fd_access_allowed(inode, 0))
74144 goto out;
74145
74146 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
74147@@ -1422,8 +1505,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
74148 struct path path;
74149
74150 /* Are we allowed to snoop on the tasks file descriptors? */
74151- if (!proc_fd_access_allowed(inode))
74152- goto out;
74153+ /* logging this is needed for learning on chromium to work properly,
74154+ but we don't want to flood the logs from 'ps' which does a readlink
74155+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
74156+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
74157+ */
74158+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
74159+ if (!proc_fd_access_allowed(inode,0))
74160+ goto out;
74161+ } else {
74162+ if (!proc_fd_access_allowed(inode,1))
74163+ goto out;
74164+ }
74165
74166 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
74167 if (error)
74168@@ -1473,7 +1566,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
74169 rcu_read_lock();
74170 cred = __task_cred(task);
74171 inode->i_uid = cred->euid;
74172+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74173+ inode->i_gid = grsec_proc_gid;
74174+#else
74175 inode->i_gid = cred->egid;
74176+#endif
74177 rcu_read_unlock();
74178 }
74179 security_task_to_inode(task, inode);
74180@@ -1509,10 +1606,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
74181 return -ENOENT;
74182 }
74183 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
74184+#ifdef CONFIG_GRKERNSEC_PROC_USER
74185+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
74186+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74187+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
74188+#endif
74189 task_dumpable(task)) {
74190 cred = __task_cred(task);
74191 stat->uid = cred->euid;
74192+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74193+ stat->gid = grsec_proc_gid;
74194+#else
74195 stat->gid = cred->egid;
74196+#endif
74197 }
74198 }
74199 rcu_read_unlock();
74200@@ -1550,11 +1656,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
74201
74202 if (task) {
74203 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
74204+#ifdef CONFIG_GRKERNSEC_PROC_USER
74205+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
74206+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74207+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
74208+#endif
74209 task_dumpable(task)) {
74210 rcu_read_lock();
74211 cred = __task_cred(task);
74212 inode->i_uid = cred->euid;
74213+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74214+ inode->i_gid = grsec_proc_gid;
74215+#else
74216 inode->i_gid = cred->egid;
74217+#endif
74218 rcu_read_unlock();
74219 } else {
74220 inode->i_uid = GLOBAL_ROOT_UID;
74221@@ -2085,6 +2200,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
74222 if (!task)
74223 goto out_no_task;
74224
74225+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
74226+ goto out;
74227+
74228 /*
74229 * Yes, it does not scale. And it should not. Don't add
74230 * new entries into /proc/<tgid>/ without very good reasons.
74231@@ -2115,6 +2233,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
74232 if (!task)
74233 return -ENOENT;
74234
74235+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
74236+ goto out;
74237+
74238 if (!dir_emit_dots(file, ctx))
74239 goto out;
74240
74241@@ -2557,7 +2678,7 @@ static const struct pid_entry tgid_base_stuff[] = {
74242 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
74243 #endif
74244 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
74245-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
74246+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
74247 ONE("syscall", S_IRUSR, proc_pid_syscall),
74248 #endif
74249 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
74250@@ -2582,10 +2703,10 @@ static const struct pid_entry tgid_base_stuff[] = {
74251 #ifdef CONFIG_SECURITY
74252 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
74253 #endif
74254-#ifdef CONFIG_KALLSYMS
74255+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
74256 ONE("wchan", S_IRUGO, proc_pid_wchan),
74257 #endif
74258-#ifdef CONFIG_STACKTRACE
74259+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
74260 ONE("stack", S_IRUSR, proc_pid_stack),
74261 #endif
74262 #ifdef CONFIG_SCHEDSTATS
74263@@ -2619,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
74264 #ifdef CONFIG_HARDWALL
74265 ONE("hardwall", S_IRUGO, proc_pid_hardwall),
74266 #endif
74267+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
74268+ ONE("ipaddr", S_IRUSR, proc_pid_ipaddr),
74269+#endif
74270 #ifdef CONFIG_USER_NS
74271 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
74272 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
74273@@ -2751,7 +2875,14 @@ static int proc_pid_instantiate(struct inode *dir,
74274 if (!inode)
74275 goto out;
74276
74277+#ifdef CONFIG_GRKERNSEC_PROC_USER
74278+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
74279+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74280+ inode->i_gid = grsec_proc_gid;
74281+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
74282+#else
74283 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
74284+#endif
74285 inode->i_op = &proc_tgid_base_inode_operations;
74286 inode->i_fop = &proc_tgid_base_operations;
74287 inode->i_flags|=S_IMMUTABLE;
74288@@ -2789,7 +2920,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
74289 if (!task)
74290 goto out;
74291
74292+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
74293+ goto out_put_task;
74294+
74295 result = proc_pid_instantiate(dir, dentry, task, NULL);
74296+out_put_task:
74297 put_task_struct(task);
74298 out:
74299 return ERR_PTR(result);
74300@@ -2903,7 +3038,7 @@ static const struct pid_entry tid_base_stuff[] = {
74301 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
74302 #endif
74303 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
74304-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
74305+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
74306 ONE("syscall", S_IRUSR, proc_pid_syscall),
74307 #endif
74308 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
74309@@ -2930,10 +3065,10 @@ static const struct pid_entry tid_base_stuff[] = {
74310 #ifdef CONFIG_SECURITY
74311 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
74312 #endif
74313-#ifdef CONFIG_KALLSYMS
74314+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
74315 ONE("wchan", S_IRUGO, proc_pid_wchan),
74316 #endif
74317-#ifdef CONFIG_STACKTRACE
74318+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
74319 ONE("stack", S_IRUSR, proc_pid_stack),
74320 #endif
74321 #ifdef CONFIG_SCHEDSTATS
74322diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
74323index cbd82df..c0407d2 100644
74324--- a/fs/proc/cmdline.c
74325+++ b/fs/proc/cmdline.c
74326@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
74327
74328 static int __init proc_cmdline_init(void)
74329 {
74330+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74331+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
74332+#else
74333 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
74334+#endif
74335 return 0;
74336 }
74337 fs_initcall(proc_cmdline_init);
74338diff --git a/fs/proc/devices.c b/fs/proc/devices.c
74339index 50493ed..248166b 100644
74340--- a/fs/proc/devices.c
74341+++ b/fs/proc/devices.c
74342@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
74343
74344 static int __init proc_devices_init(void)
74345 {
74346+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74347+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
74348+#else
74349 proc_create("devices", 0, NULL, &proc_devinfo_operations);
74350+#endif
74351 return 0;
74352 }
74353 fs_initcall(proc_devices_init);
74354diff --git a/fs/proc/fd.c b/fs/proc/fd.c
74355index 8e5ad83..1f07a8c 100644
74356--- a/fs/proc/fd.c
74357+++ b/fs/proc/fd.c
74358@@ -26,7 +26,8 @@ static int seq_show(struct seq_file *m, void *v)
74359 if (!task)
74360 return -ENOENT;
74361
74362- files = get_files_struct(task);
74363+ if (!gr_acl_handle_procpidmem(task))
74364+ files = get_files_struct(task);
74365 put_task_struct(task);
74366
74367 if (files) {
74368@@ -284,11 +285,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
74369 */
74370 int proc_fd_permission(struct inode *inode, int mask)
74371 {
74372+ struct task_struct *task;
74373 int rv = generic_permission(inode, mask);
74374- if (rv == 0)
74375- return 0;
74376+
74377 if (task_tgid(current) == proc_pid(inode))
74378 rv = 0;
74379+
74380+ task = get_proc_task(inode);
74381+ if (task == NULL)
74382+ return rv;
74383+
74384+ if (gr_acl_handle_procpidmem(task))
74385+ rv = -EACCES;
74386+
74387+ put_task_struct(task);
74388+
74389 return rv;
74390 }
74391
74392diff --git a/fs/proc/generic.c b/fs/proc/generic.c
74393index be65b20..2998ba8 100644
74394--- a/fs/proc/generic.c
74395+++ b/fs/proc/generic.c
74396@@ -22,6 +22,7 @@
74397 #include <linux/bitops.h>
74398 #include <linux/spinlock.h>
74399 #include <linux/completion.h>
74400+#include <linux/grsecurity.h>
74401 #include <asm/uaccess.h>
74402
74403 #include "internal.h"
74404@@ -253,6 +254,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
74405 return proc_lookup_de(PDE(dir), dir, dentry);
74406 }
74407
74408+struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry,
74409+ unsigned int flags)
74410+{
74411+ if (gr_proc_is_restricted())
74412+ return ERR_PTR(-EACCES);
74413+
74414+ return proc_lookup_de(PDE(dir), dir, dentry);
74415+}
74416+
74417 /*
74418 * This returns non-zero if at EOF, so that the /proc
74419 * root directory can use this and check if it should
74420@@ -310,6 +320,16 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
74421 return proc_readdir_de(PDE(inode), file, ctx);
74422 }
74423
74424+int proc_readdir_restrict(struct file *file, struct dir_context *ctx)
74425+{
74426+ struct inode *inode = file_inode(file);
74427+
74428+ if (gr_proc_is_restricted())
74429+ return -EACCES;
74430+
74431+ return proc_readdir_de(PDE(inode), file, ctx);
74432+}
74433+
74434 /*
74435 * These are the generic /proc directory operations. They
74436 * use the in-memory "struct proc_dir_entry" tree to parse
74437@@ -321,6 +341,12 @@ static const struct file_operations proc_dir_operations = {
74438 .iterate = proc_readdir,
74439 };
74440
74441+static const struct file_operations proc_dir_restricted_operations = {
74442+ .llseek = generic_file_llseek,
74443+ .read = generic_read_dir,
74444+ .iterate = proc_readdir_restrict,
74445+};
74446+
74447 /*
74448 * proc directories can do almost nothing..
74449 */
74450@@ -330,6 +356,12 @@ static const struct inode_operations proc_dir_inode_operations = {
74451 .setattr = proc_notify_change,
74452 };
74453
74454+static const struct inode_operations proc_dir_restricted_inode_operations = {
74455+ .lookup = proc_lookup_restrict,
74456+ .getattr = proc_getattr,
74457+ .setattr = proc_notify_change,
74458+};
74459+
74460 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
74461 {
74462 int ret;
74463@@ -441,6 +473,31 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
74464 }
74465 EXPORT_SYMBOL_GPL(proc_mkdir_data);
74466
74467+struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, umode_t mode,
74468+ struct proc_dir_entry *parent, void *data)
74469+{
74470+ struct proc_dir_entry *ent;
74471+
74472+ if (mode == 0)
74473+ mode = S_IRUGO | S_IXUGO;
74474+
74475+ ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
74476+ if (ent) {
74477+ ent->data = data;
74478+ ent->restricted = 1;
74479+ ent->proc_fops = &proc_dir_restricted_operations;
74480+ ent->proc_iops = &proc_dir_restricted_inode_operations;
74481+ parent->nlink++;
74482+ if (proc_register(parent, ent) < 0) {
74483+ kfree(ent);
74484+ parent->nlink--;
74485+ ent = NULL;
74486+ }
74487+ }
74488+ return ent;
74489+}
74490+EXPORT_SYMBOL_GPL(proc_mkdir_data_restrict);
74491+
74492 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
74493 struct proc_dir_entry *parent)
74494 {
74495@@ -455,6 +512,13 @@ struct proc_dir_entry *proc_mkdir(const char *name,
74496 }
74497 EXPORT_SYMBOL(proc_mkdir);
74498
74499+struct proc_dir_entry *proc_mkdir_restrict(const char *name,
74500+ struct proc_dir_entry *parent)
74501+{
74502+ return proc_mkdir_data_restrict(name, 0, parent, NULL);
74503+}
74504+EXPORT_SYMBOL(proc_mkdir_restrict);
74505+
74506 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
74507 struct proc_dir_entry *parent,
74508 const struct file_operations *proc_fops,
74509diff --git a/fs/proc/inode.c b/fs/proc/inode.c
74510index 7697b66..8d8e541 100644
74511--- a/fs/proc/inode.c
74512+++ b/fs/proc/inode.c
74513@@ -24,11 +24,17 @@
74514 #include <linux/mount.h>
74515 #include <linux/magic.h>
74516 #include <linux/namei.h>
74517+#include <linux/grsecurity.h>
74518
74519 #include <asm/uaccess.h>
74520
74521 #include "internal.h"
74522
74523+#ifdef CONFIG_PROC_SYSCTL
74524+extern const struct inode_operations proc_sys_inode_operations;
74525+extern const struct inode_operations proc_sys_dir_operations;
74526+#endif
74527+
74528 static void proc_evict_inode(struct inode *inode)
74529 {
74530 struct proc_dir_entry *de;
74531@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
74532 RCU_INIT_POINTER(PROC_I(inode)->sysctl, NULL);
74533 sysctl_head_put(head);
74534 }
74535+
74536+#ifdef CONFIG_PROC_SYSCTL
74537+ if (inode->i_op == &proc_sys_inode_operations ||
74538+ inode->i_op == &proc_sys_dir_operations)
74539+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
74540+#endif
74541+
74542 }
74543
74544 static struct kmem_cache * proc_inode_cachep;
74545@@ -426,7 +439,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
74546 if (de->mode) {
74547 inode->i_mode = de->mode;
74548 inode->i_uid = de->uid;
74549+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74550+ inode->i_gid = grsec_proc_gid;
74551+#else
74552 inode->i_gid = de->gid;
74553+#endif
74554 }
74555 if (de->size)
74556 inode->i_size = de->size;
74557diff --git a/fs/proc/internal.h b/fs/proc/internal.h
74558index c835b94..c9e01a3 100644
74559--- a/fs/proc/internal.h
74560+++ b/fs/proc/internal.h
74561@@ -47,9 +47,10 @@ struct proc_dir_entry {
74562 struct completion *pde_unload_completion;
74563 struct list_head pde_openers; /* who did ->open, but not ->release */
74564 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
74565+ u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */
74566 u8 namelen;
74567 char name[];
74568-};
74569+} __randomize_layout;
74570
74571 union proc_op {
74572 int (*proc_get_link)(struct dentry *, struct path *);
74573@@ -67,7 +68,7 @@ struct proc_inode {
74574 struct ctl_table *sysctl_entry;
74575 const struct proc_ns_operations *ns_ops;
74576 struct inode vfs_inode;
74577-};
74578+} __randomize_layout;
74579
74580 /*
74581 * General functions
74582@@ -155,6 +156,10 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
74583 struct pid *, struct task_struct *);
74584 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
74585 struct pid *, struct task_struct *);
74586+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
74587+extern int proc_pid_ipaddr(struct seq_file *, struct pid_namespace *,
74588+ struct pid *, struct task_struct *);
74589+#endif
74590
74591 /*
74592 * base.c
74593@@ -179,9 +184,11 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i
74594 * generic.c
74595 */
74596 extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
74597+extern struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, unsigned int);
74598 extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
74599 struct dentry *);
74600 extern int proc_readdir(struct file *, struct dir_context *);
74601+extern int proc_readdir_restrict(struct file *, struct dir_context *);
74602 extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
74603
74604 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
74605diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
74606index a352d57..cb94a5c 100644
74607--- a/fs/proc/interrupts.c
74608+++ b/fs/proc/interrupts.c
74609@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
74610
74611 static int __init proc_interrupts_init(void)
74612 {
74613+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74614+ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
74615+#else
74616 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
74617+#endif
74618 return 0;
74619 }
74620 fs_initcall(proc_interrupts_init);
74621diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
74622index 91a4e64..14bf8fa 100644
74623--- a/fs/proc/kcore.c
74624+++ b/fs/proc/kcore.c
74625@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
74626 * the addresses in the elf_phdr on our list.
74627 */
74628 start = kc_offset_to_vaddr(*fpos - elf_buflen);
74629- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
74630+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
74631+ if (tsz > buflen)
74632 tsz = buflen;
74633-
74634+
74635 while (buflen) {
74636 struct kcore_list *m;
74637
74638@@ -515,19 +516,20 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
74639 } else {
74640 if (kern_addr_valid(start)) {
74641 unsigned long n;
74642+ char *elf_buf;
74643+ mm_segment_t oldfs;
74644
74645- n = copy_to_user(buffer, (char *)start, tsz);
74646- /*
74647- * We cannot distinguish between fault on source
74648- * and fault on destination. When this happens
74649- * we clear too and hope it will trigger the
74650- * EFAULT again.
74651- */
74652- if (n) {
74653- if (clear_user(buffer + tsz - n,
74654- n))
74655- return -EFAULT;
74656- }
74657+ elf_buf = kzalloc(tsz, GFP_KERNEL);
74658+ if (!elf_buf)
74659+ return -ENOMEM;
74660+ oldfs = get_fs();
74661+ set_fs(KERNEL_DS);
74662+ n = __copy_from_user(elf_buf, (const void __user *)start, tsz);
74663+ set_fs(oldfs);
74664+ n = copy_to_user(buffer, elf_buf, tsz);
74665+ kfree(elf_buf);
74666+ if (n)
74667+ return -EFAULT;
74668 } else {
74669 if (clear_user(buffer, tsz))
74670 return -EFAULT;
74671@@ -547,6 +549,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
74672
74673 static int open_kcore(struct inode *inode, struct file *filp)
74674 {
74675+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
74676+ return -EPERM;
74677+#endif
74678 if (!capable(CAP_SYS_RAWIO))
74679 return -EPERM;
74680 if (kcore_need_update)
74681@@ -580,7 +585,7 @@ static int __meminit kcore_callback(struct notifier_block *self,
74682 return NOTIFY_OK;
74683 }
74684
74685-static struct notifier_block kcore_callback_nb __meminitdata = {
74686+static struct notifier_block kcore_callback_nb __meminitconst = {
74687 .notifier_call = kcore_callback,
74688 .priority = 0,
74689 };
74690diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
74691index d3ebf2e..6ad42d1 100644
74692--- a/fs/proc/meminfo.c
74693+++ b/fs/proc/meminfo.c
74694@@ -194,7 +194,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
74695 vmi.used >> 10,
74696 vmi.largest_chunk >> 10
74697 #ifdef CONFIG_MEMORY_FAILURE
74698- , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
74699+ , atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
74700 #endif
74701 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
74702 , K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
74703diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
74704index d4a3574..b421ce9 100644
74705--- a/fs/proc/nommu.c
74706+++ b/fs/proc/nommu.c
74707@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
74708
74709 if (file) {
74710 seq_pad(m, ' ');
74711- seq_path(m, &file->f_path, "");
74712+ seq_path(m, &file->f_path, "\n\\");
74713 }
74714
74715 seq_putc(m, '\n');
74716diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
74717index 1bde894..22ac7eb 100644
74718--- a/fs/proc/proc_net.c
74719+++ b/fs/proc/proc_net.c
74720@@ -23,9 +23,27 @@
74721 #include <linux/nsproxy.h>
74722 #include <net/net_namespace.h>
74723 #include <linux/seq_file.h>
74724+#include <linux/grsecurity.h>
74725
74726 #include "internal.h"
74727
74728+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
74729+static struct seq_operations *ipv6_seq_ops_addr;
74730+
74731+void register_ipv6_seq_ops_addr(struct seq_operations *addr)
74732+{
74733+ ipv6_seq_ops_addr = addr;
74734+}
74735+
74736+void unregister_ipv6_seq_ops_addr(void)
74737+{
74738+ ipv6_seq_ops_addr = NULL;
74739+}
74740+
74741+EXPORT_SYMBOL_GPL(register_ipv6_seq_ops_addr);
74742+EXPORT_SYMBOL_GPL(unregister_ipv6_seq_ops_addr);
74743+#endif
74744+
74745 static inline struct net *PDE_NET(struct proc_dir_entry *pde)
74746 {
74747 return pde->parent->data;
74748@@ -36,6 +54,8 @@ static struct net *get_proc_net(const struct inode *inode)
74749 return maybe_get_net(PDE_NET(PDE(inode)));
74750 }
74751
74752+extern const struct seq_operations dev_seq_ops;
74753+
74754 int seq_open_net(struct inode *ino, struct file *f,
74755 const struct seq_operations *ops, int size)
74756 {
74757@@ -44,6 +64,14 @@ int seq_open_net(struct inode *ino, struct file *f,
74758
74759 BUG_ON(size < sizeof(*p));
74760
74761+ /* only permit access to /proc/net/dev */
74762+ if (
74763+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
74764+ ops != ipv6_seq_ops_addr &&
74765+#endif
74766+ ops != &dev_seq_ops && gr_proc_is_restricted())
74767+ return -EACCES;
74768+
74769 net = get_proc_net(ino);
74770 if (net == NULL)
74771 return -ENXIO;
74772@@ -66,6 +94,9 @@ int single_open_net(struct inode *inode, struct file *file,
74773 int err;
74774 struct net *net;
74775
74776+ if (gr_proc_is_restricted())
74777+ return -EACCES;
74778+
74779 err = -ENXIO;
74780 net = get_proc_net(inode);
74781 if (net == NULL)
74782diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
74783index f92d5dd..26398ac 100644
74784--- a/fs/proc/proc_sysctl.c
74785+++ b/fs/proc/proc_sysctl.c
74786@@ -11,13 +11,21 @@
74787 #include <linux/namei.h>
74788 #include <linux/mm.h>
74789 #include <linux/module.h>
74790+#include <linux/nsproxy.h>
74791+#ifdef CONFIG_GRKERNSEC
74792+#include <net/net_namespace.h>
74793+#endif
74794 #include "internal.h"
74795
74796+extern int gr_handle_chroot_sysctl(const int op);
74797+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
74798+ const int op);
74799+
74800 static const struct dentry_operations proc_sys_dentry_operations;
74801 static const struct file_operations proc_sys_file_operations;
74802-static const struct inode_operations proc_sys_inode_operations;
74803+const struct inode_operations proc_sys_inode_operations;
74804 static const struct file_operations proc_sys_dir_file_operations;
74805-static const struct inode_operations proc_sys_dir_operations;
74806+const struct inode_operations proc_sys_dir_operations;
74807
74808 void proc_sys_poll_notify(struct ctl_table_poll *poll)
74809 {
74810@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
74811
74812 err = NULL;
74813 d_set_d_op(dentry, &proc_sys_dentry_operations);
74814+
74815+ gr_handle_proc_create(dentry, inode);
74816+
74817 d_add(dentry, inode);
74818
74819 out:
74820@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
74821 struct inode *inode = file_inode(filp);
74822 struct ctl_table_header *head = grab_header(inode);
74823 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
74824+ int op = write ? MAY_WRITE : MAY_READ;
74825 ssize_t error;
74826 size_t res;
74827
74828@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
74829 * and won't be until we finish.
74830 */
74831 error = -EPERM;
74832- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
74833+ if (sysctl_perm(head, table, op))
74834 goto out;
74835
74836 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
74837@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
74838 if (!table->proc_handler)
74839 goto out;
74840
74841+#ifdef CONFIG_GRKERNSEC
74842+ error = -EPERM;
74843+ if (gr_handle_chroot_sysctl(op))
74844+ goto out;
74845+ dget(filp->f_path.dentry);
74846+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
74847+ dput(filp->f_path.dentry);
74848+ goto out;
74849+ }
74850+ dput(filp->f_path.dentry);
74851+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
74852+ goto out;
74853+ if (write) {
74854+ if (current->nsproxy->net_ns != table->extra2) {
74855+ if (!capable(CAP_SYS_ADMIN))
74856+ goto out;
74857+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
74858+ goto out;
74859+ }
74860+#endif
74861+
74862 /* careful: calling conventions are nasty here */
74863 res = count;
74864 error = table->proc_handler(table, write, buf, &res, ppos);
74865@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
74866 return false;
74867 } else {
74868 d_set_d_op(child, &proc_sys_dentry_operations);
74869+
74870+ gr_handle_proc_create(child, inode);
74871+
74872 d_add(child, inode);
74873 }
74874 } else {
74875@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, struct ctl_table *table,
74876 if ((*pos)++ < ctx->pos)
74877 return true;
74878
74879+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
74880+ return 0;
74881+
74882 if (unlikely(S_ISLNK(table->mode)))
74883 res = proc_sys_link_fill_cache(file, ctx, head, table);
74884 else
74885@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
74886 if (IS_ERR(head))
74887 return PTR_ERR(head);
74888
74889+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
74890+ return -ENOENT;
74891+
74892 generic_fillattr(inode, stat);
74893 if (table)
74894 stat->mode = (stat->mode & S_IFMT) | table->mode;
74895@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
74896 .llseek = generic_file_llseek,
74897 };
74898
74899-static const struct inode_operations proc_sys_inode_operations = {
74900+const struct inode_operations proc_sys_inode_operations = {
74901 .permission = proc_sys_permission,
74902 .setattr = proc_sys_setattr,
74903 .getattr = proc_sys_getattr,
74904 };
74905
74906-static const struct inode_operations proc_sys_dir_operations = {
74907+const struct inode_operations proc_sys_dir_operations = {
74908 .lookup = proc_sys_lookup,
74909 .permission = proc_sys_permission,
74910 .setattr = proc_sys_setattr,
74911@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
74912 static struct ctl_dir *new_dir(struct ctl_table_set *set,
74913 const char *name, int namelen)
74914 {
74915- struct ctl_table *table;
74916+ ctl_table_no_const *table;
74917 struct ctl_dir *new;
74918 struct ctl_node *node;
74919 char *new_name;
74920@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
74921 return NULL;
74922
74923 node = (struct ctl_node *)(new + 1);
74924- table = (struct ctl_table *)(node + 1);
74925+ table = (ctl_table_no_const *)(node + 1);
74926 new_name = (char *)(table + 2);
74927 memcpy(new_name, name, namelen);
74928 new_name[namelen] = '\0';
74929@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
74930 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
74931 struct ctl_table_root *link_root)
74932 {
74933- struct ctl_table *link_table, *entry, *link;
74934+ ctl_table_no_const *link_table, *link;
74935+ struct ctl_table *entry;
74936 struct ctl_table_header *links;
74937 struct ctl_node *node;
74938 char *link_name;
74939@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
74940 return NULL;
74941
74942 node = (struct ctl_node *)(links + 1);
74943- link_table = (struct ctl_table *)(node + nr_entries);
74944+ link_table = (ctl_table_no_const *)(node + nr_entries);
74945 link_name = (char *)&link_table[nr_entries + 1];
74946
74947 for (link = link_table, entry = table; entry->procname; link++, entry++) {
74948@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
74949 struct ctl_table_header ***subheader, struct ctl_table_set *set,
74950 struct ctl_table *table)
74951 {
74952- struct ctl_table *ctl_table_arg = NULL;
74953- struct ctl_table *entry, *files;
74954+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
74955+ struct ctl_table *entry;
74956 int nr_files = 0;
74957 int nr_dirs = 0;
74958 int err = -ENOMEM;
74959@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
74960 nr_files++;
74961 }
74962
74963- files = table;
74964 /* If there are mixed files and directories we need a new table */
74965 if (nr_dirs && nr_files) {
74966- struct ctl_table *new;
74967+ ctl_table_no_const *new;
74968 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
74969 GFP_KERNEL);
74970 if (!files)
74971@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
74972 /* Register everything except a directory full of subdirectories */
74973 if (nr_files || !nr_dirs) {
74974 struct ctl_table_header *header;
74975- header = __register_sysctl_table(set, path, files);
74976+ header = __register_sysctl_table(set, path, files ? files : table);
74977 if (!header) {
74978 kfree(ctl_table_arg);
74979 goto out;
74980diff --git a/fs/proc/root.c b/fs/proc/root.c
74981index e74ac9f..35e89f4 100644
74982--- a/fs/proc/root.c
74983+++ b/fs/proc/root.c
74984@@ -188,7 +188,15 @@ void __init proc_root_init(void)
74985 proc_mkdir("openprom", NULL);
74986 #endif
74987 proc_tty_init();
74988+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74989+#ifdef CONFIG_GRKERNSEC_PROC_USER
74990+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
74991+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74992+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
74993+#endif
74994+#else
74995 proc_mkdir("bus", NULL);
74996+#endif
74997 proc_sys_init();
74998 }
74999
75000diff --git a/fs/proc/stat.c b/fs/proc/stat.c
75001index 510413eb..34d9a8c 100644
75002--- a/fs/proc/stat.c
75003+++ b/fs/proc/stat.c
75004@@ -11,6 +11,7 @@
75005 #include <linux/irqnr.h>
75006 #include <linux/cputime.h>
75007 #include <linux/tick.h>
75008+#include <linux/grsecurity.h>
75009
75010 #ifndef arch_irq_stat_cpu
75011 #define arch_irq_stat_cpu(cpu) 0
75012@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
75013 u64 sum_softirq = 0;
75014 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
75015 struct timespec boottime;
75016+ int unrestricted = 1;
75017+
75018+#ifdef CONFIG_GRKERNSEC_PROC_ADD
75019+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
75020+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
75021+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
75022+ && !in_group_p(grsec_proc_gid)
75023+#endif
75024+ )
75025+ unrestricted = 0;
75026+#endif
75027+#endif
75028
75029 user = nice = system = idle = iowait =
75030 irq = softirq = steal = 0;
75031@@ -99,23 +112,25 @@ static int show_stat(struct seq_file *p, void *v)
75032 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
75033 system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
75034 idle += get_idle_time(i);
75035- iowait += get_iowait_time(i);
75036- irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
75037- softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
75038- steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
75039- guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
75040- guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
75041- sum += kstat_cpu_irqs_sum(i);
75042- sum += arch_irq_stat_cpu(i);
75043+ if (unrestricted) {
75044+ iowait += get_iowait_time(i);
75045+ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
75046+ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
75047+ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
75048+ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
75049+ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
75050+ sum += kstat_cpu_irqs_sum(i);
75051+ sum += arch_irq_stat_cpu(i);
75052+ for (j = 0; j < NR_SOFTIRQS; j++) {
75053+ unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
75054
75055- for (j = 0; j < NR_SOFTIRQS; j++) {
75056- unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
75057-
75058- per_softirq_sums[j] += softirq_stat;
75059- sum_softirq += softirq_stat;
75060+ per_softirq_sums[j] += softirq_stat;
75061+ sum_softirq += softirq_stat;
75062+ }
75063 }
75064 }
75065- sum += arch_irq_stat();
75066+ if (unrestricted)
75067+ sum += arch_irq_stat();
75068
75069 seq_puts(p, "cpu ");
75070 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
75071@@ -136,12 +151,14 @@ static int show_stat(struct seq_file *p, void *v)
75072 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
75073 system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
75074 idle = get_idle_time(i);
75075- iowait = get_iowait_time(i);
75076- irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
75077- softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
75078- steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
75079- guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
75080- guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
75081+ if (unrestricted) {
75082+ iowait = get_iowait_time(i);
75083+ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
75084+ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
75085+ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
75086+ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
75087+ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
75088+ }
75089 seq_printf(p, "cpu%d", i);
75090 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
75091 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
75092@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
75093
75094 /* sum again ? it could be updated? */
75095 for_each_irq_nr(j)
75096- seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j));
75097+ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs_usr(j) : 0ULL);
75098
75099 seq_printf(p,
75100 "\nctxt %llu\n"
75101@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
75102 "processes %lu\n"
75103 "procs_running %lu\n"
75104 "procs_blocked %lu\n",
75105- nr_context_switches(),
75106+ unrestricted ? nr_context_switches() : 0ULL,
75107 (unsigned long)jif,
75108- total_forks,
75109- nr_running(),
75110- nr_iowait());
75111+ unrestricted ? total_forks : 0UL,
75112+ unrestricted ? nr_running() : 0UL,
75113+ unrestricted ? nr_iowait() : 0UL);
75114
75115 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
75116
75117diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
75118index 6dee68d..1b4add0 100644
75119--- a/fs/proc/task_mmu.c
75120+++ b/fs/proc/task_mmu.c
75121@@ -13,12 +13,19 @@
75122 #include <linux/swap.h>
75123 #include <linux/swapops.h>
75124 #include <linux/mmu_notifier.h>
75125+#include <linux/grsecurity.h>
75126
75127 #include <asm/elf.h>
75128 #include <asm/uaccess.h>
75129 #include <asm/tlbflush.h>
75130 #include "internal.h"
75131
75132+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75133+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
75134+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
75135+ _mm->pax_flags & MF_PAX_SEGMEXEC))
75136+#endif
75137+
75138 void task_mem(struct seq_file *m, struct mm_struct *mm)
75139 {
75140 unsigned long data, text, lib, swap, ptes, pmds;
75141@@ -57,8 +64,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
75142 "VmLib:\t%8lu kB\n"
75143 "VmPTE:\t%8lu kB\n"
75144 "VmPMD:\t%8lu kB\n"
75145- "VmSwap:\t%8lu kB\n",
75146- hiwater_vm << (PAGE_SHIFT-10),
75147+ "VmSwap:\t%8lu kB\n"
75148+
75149+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
75150+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
75151+#endif
75152+
75153+ ,hiwater_vm << (PAGE_SHIFT-10),
75154 total_vm << (PAGE_SHIFT-10),
75155 mm->locked_vm << (PAGE_SHIFT-10),
75156 mm->pinned_vm << (PAGE_SHIFT-10),
75157@@ -68,7 +80,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
75158 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
75159 ptes >> 10,
75160 pmds >> 10,
75161- swap << (PAGE_SHIFT-10));
75162+ swap << (PAGE_SHIFT-10)
75163+
75164+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
75165+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75166+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
75167+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
75168+#else
75169+ , mm->context.user_cs_base
75170+ , mm->context.user_cs_limit
75171+#endif
75172+#endif
75173+
75174+ );
75175 }
75176
75177 unsigned long task_vsize(struct mm_struct *mm)
75178@@ -285,13 +309,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
75179 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
75180 }
75181
75182- /* We don't show the stack guard page in /proc/maps */
75183+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75184+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
75185+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
75186+#else
75187 start = vma->vm_start;
75188- if (stack_guard_page_start(vma, start))
75189- start += PAGE_SIZE;
75190 end = vma->vm_end;
75191- if (stack_guard_page_end(vma, end))
75192- end -= PAGE_SIZE;
75193+#endif
75194
75195 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
75196 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
75197@@ -301,7 +325,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
75198 flags & VM_WRITE ? 'w' : '-',
75199 flags & VM_EXEC ? 'x' : '-',
75200 flags & VM_MAYSHARE ? 's' : 'p',
75201+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75202+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
75203+#else
75204 pgoff,
75205+#endif
75206 MAJOR(dev), MINOR(dev), ino);
75207
75208 /*
75209@@ -310,7 +338,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
75210 */
75211 if (file) {
75212 seq_pad(m, ' ');
75213- seq_path(m, &file->f_path, "\n");
75214+ seq_path(m, &file->f_path, "\n\\");
75215 goto done;
75216 }
75217
75218@@ -341,8 +369,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
75219 * Thread stack in /proc/PID/task/TID/maps or
75220 * the main process stack.
75221 */
75222- if (!is_pid || (vma->vm_start <= mm->start_stack &&
75223- vma->vm_end >= mm->start_stack)) {
75224+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
75225+ (vma->vm_start <= mm->start_stack &&
75226+ vma->vm_end >= mm->start_stack)) {
75227 name = "[stack]";
75228 } else {
75229 /* Thread stack in /proc/PID/maps */
75230@@ -362,6 +391,12 @@ done:
75231
75232 static int show_map(struct seq_file *m, void *v, int is_pid)
75233 {
75234+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75235+ if (current->exec_id != m->exec_id) {
75236+ gr_log_badprocpid("maps");
75237+ return 0;
75238+ }
75239+#endif
75240 show_map_vma(m, v, is_pid);
75241 m_cache_vma(m, v);
75242 return 0;
75243@@ -620,9 +655,18 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
75244 .private = &mss,
75245 };
75246
75247+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75248+ if (current->exec_id != m->exec_id) {
75249+ gr_log_badprocpid("smaps");
75250+ return 0;
75251+ }
75252+#endif
75253 memset(&mss, 0, sizeof mss);
75254- /* mmap_sem is held in m_start */
75255- walk_page_vma(vma, &smaps_walk);
75256+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75257+ if (!PAX_RAND_FLAGS(vma->vm_mm))
75258+#endif
75259+ /* mmap_sem is held in m_start */
75260+ walk_page_vma(vma, &smaps_walk);
75261
75262 show_map_vma(m, vma, is_pid);
75263
75264@@ -641,7 +685,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
75265 "KernelPageSize: %8lu kB\n"
75266 "MMUPageSize: %8lu kB\n"
75267 "Locked: %8lu kB\n",
75268+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75269+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
75270+#else
75271 (vma->vm_end - vma->vm_start) >> 10,
75272+#endif
75273 mss.resident >> 10,
75274 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
75275 mss.shared_clean >> 10,
75276@@ -1491,6 +1539,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
75277 char buffer[64];
75278 int nid;
75279
75280+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75281+ if (current->exec_id != m->exec_id) {
75282+ gr_log_badprocpid("numa_maps");
75283+ return 0;
75284+ }
75285+#endif
75286+
75287 if (!mm)
75288 return 0;
75289
75290@@ -1505,11 +1560,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
75291 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
75292 }
75293
75294+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75295+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
75296+#else
75297 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
75298+#endif
75299
75300 if (file) {
75301 seq_puts(m, " file=");
75302- seq_path(m, &file->f_path, "\n\t= ");
75303+ seq_path(m, &file->f_path, "\n\t\\= ");
75304 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
75305 seq_puts(m, " heap");
75306 } else {
75307diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
75308index 599ec2e..f1413ae 100644
75309--- a/fs/proc/task_nommu.c
75310+++ b/fs/proc/task_nommu.c
75311@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
75312 else
75313 bytes += kobjsize(mm);
75314
75315- if (current->fs && current->fs->users > 1)
75316+ if (current->fs && atomic_read(&current->fs->users) > 1)
75317 sbytes += kobjsize(current->fs);
75318 else
75319 bytes += kobjsize(current->fs);
75320@@ -180,7 +180,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
75321
75322 if (file) {
75323 seq_pad(m, ' ');
75324- seq_path(m, &file->f_path, "");
75325+ seq_path(m, &file->f_path, "\n\\");
75326 } else if (mm) {
75327 pid_t tid = pid_of_stack(priv, vma, is_pid);
75328
75329diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
75330index 4e61388..1a2523d 100644
75331--- a/fs/proc/vmcore.c
75332+++ b/fs/proc/vmcore.c
75333@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
75334 nr_bytes = count;
75335
75336 /* If pfn is not ram, return zeros for sparse dump files */
75337- if (pfn_is_ram(pfn) == 0)
75338- memset(buf, 0, nr_bytes);
75339- else {
75340+ if (pfn_is_ram(pfn) == 0) {
75341+ if (userbuf) {
75342+ if (clear_user((char __force_user *)buf, nr_bytes))
75343+ return -EFAULT;
75344+ } else
75345+ memset(buf, 0, nr_bytes);
75346+ } else {
75347 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
75348 offset, userbuf);
75349 if (tmp < 0)
75350@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
75351 static int copy_to(void *target, void *src, size_t size, int userbuf)
75352 {
75353 if (userbuf) {
75354- if (copy_to_user((char __user *) target, src, size))
75355+ if (copy_to_user((char __force_user *) target, src, size))
75356 return -EFAULT;
75357 } else {
75358 memcpy(target, src, size);
75359@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
75360 if (*fpos < m->offset + m->size) {
75361 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
75362 start = m->paddr + *fpos - m->offset;
75363- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
75364+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
75365 if (tmp < 0)
75366 return tmp;
75367 buflen -= tsz;
75368@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
75369 static ssize_t read_vmcore(struct file *file, char __user *buffer,
75370 size_t buflen, loff_t *fpos)
75371 {
75372- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
75373+ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
75374 }
75375
75376 /*
75377diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
75378index d3fb2b6..43a8140 100644
75379--- a/fs/qnx6/qnx6.h
75380+++ b/fs/qnx6/qnx6.h
75381@@ -74,7 +74,7 @@ enum {
75382 BYTESEX_BE,
75383 };
75384
75385-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
75386+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
75387 {
75388 if (sbi->s_bytesex == BYTESEX_LE)
75389 return le64_to_cpu((__force __le64)n);
75390@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
75391 return (__force __fs64)cpu_to_be64(n);
75392 }
75393
75394-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
75395+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
75396 {
75397 if (sbi->s_bytesex == BYTESEX_LE)
75398 return le32_to_cpu((__force __le32)n);
75399diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
75400index bb2869f..d34ada8 100644
75401--- a/fs/quota/netlink.c
75402+++ b/fs/quota/netlink.c
75403@@ -44,7 +44,7 @@ static struct genl_family quota_genl_family = {
75404 void quota_send_warning(struct kqid qid, dev_t dev,
75405 const char warntype)
75406 {
75407- static atomic_t seq;
75408+ static atomic_unchecked_t seq;
75409 struct sk_buff *skb;
75410 void *msg_head;
75411 int ret;
75412@@ -60,7 +60,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
75413 "VFS: Not enough memory to send quota warning.\n");
75414 return;
75415 }
75416- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
75417+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
75418 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
75419 if (!msg_head) {
75420 printk(KERN_ERR
75421diff --git a/fs/read_write.c b/fs/read_write.c
75422index 8e1b687..bad2eec 100644
75423--- a/fs/read_write.c
75424+++ b/fs/read_write.c
75425@@ -553,7 +553,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
75426
75427 old_fs = get_fs();
75428 set_fs(get_ds());
75429- p = (__force const char __user *)buf;
75430+ p = (const char __force_user *)buf;
75431 if (count > MAX_RW_COUNT)
75432 count = MAX_RW_COUNT;
75433 if (file->f_op->write)
75434diff --git a/fs/readdir.c b/fs/readdir.c
75435index ced6791..936687b 100644
75436--- a/fs/readdir.c
75437+++ b/fs/readdir.c
75438@@ -18,6 +18,7 @@
75439 #include <linux/security.h>
75440 #include <linux/syscalls.h>
75441 #include <linux/unistd.h>
75442+#include <linux/namei.h>
75443
75444 #include <asm/uaccess.h>
75445
75446@@ -71,6 +72,7 @@ struct old_linux_dirent {
75447 struct readdir_callback {
75448 struct dir_context ctx;
75449 struct old_linux_dirent __user * dirent;
75450+ struct file * file;
75451 int result;
75452 };
75453
75454@@ -89,6 +91,10 @@ static int fillonedir(struct dir_context *ctx, const char *name, int namlen,
75455 buf->result = -EOVERFLOW;
75456 return -EOVERFLOW;
75457 }
75458+
75459+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
75460+ return 0;
75461+
75462 buf->result++;
75463 dirent = buf->dirent;
75464 if (!access_ok(VERIFY_WRITE, dirent,
75465@@ -120,6 +126,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
75466 if (!f.file)
75467 return -EBADF;
75468
75469+ buf.file = f.file;
75470 error = iterate_dir(f.file, &buf.ctx);
75471 if (buf.result)
75472 error = buf.result;
75473@@ -145,6 +152,7 @@ struct getdents_callback {
75474 struct dir_context ctx;
75475 struct linux_dirent __user * current_dir;
75476 struct linux_dirent __user * previous;
75477+ struct file * file;
75478 int count;
75479 int error;
75480 };
75481@@ -167,6 +175,10 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
75482 buf->error = -EOVERFLOW;
75483 return -EOVERFLOW;
75484 }
75485+
75486+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
75487+ return 0;
75488+
75489 dirent = buf->previous;
75490 if (dirent) {
75491 if (__put_user(offset, &dirent->d_off))
75492@@ -212,6 +224,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
75493 if (!f.file)
75494 return -EBADF;
75495
75496+ buf.file = f.file;
75497 error = iterate_dir(f.file, &buf.ctx);
75498 if (error >= 0)
75499 error = buf.error;
75500@@ -230,6 +243,7 @@ struct getdents_callback64 {
75501 struct dir_context ctx;
75502 struct linux_dirent64 __user * current_dir;
75503 struct linux_dirent64 __user * previous;
75504+ struct file *file;
75505 int count;
75506 int error;
75507 };
75508@@ -246,6 +260,10 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
75509 buf->error = -EINVAL; /* only used if we fail.. */
75510 if (reclen > buf->count)
75511 return -EINVAL;
75512+
75513+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
75514+ return 0;
75515+
75516 dirent = buf->previous;
75517 if (dirent) {
75518 if (__put_user(offset, &dirent->d_off))
75519@@ -293,6 +311,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
75520 if (!f.file)
75521 return -EBADF;
75522
75523+ buf.file = f.file;
75524 error = iterate_dir(f.file, &buf.ctx);
75525 if (error >= 0)
75526 error = buf.error;
75527diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
75528index 9c02d96..6562c10 100644
75529--- a/fs/reiserfs/do_balan.c
75530+++ b/fs/reiserfs/do_balan.c
75531@@ -1887,7 +1887,7 @@ void do_balance(struct tree_balance *tb, struct item_head *ih,
75532 return;
75533 }
75534
75535- atomic_inc(&fs_generation(tb->tb_sb));
75536+ atomic_inc_unchecked(&fs_generation(tb->tb_sb));
75537 do_balance_starts(tb);
75538
75539 /*
75540diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
75541index aca73dd..e3c558d 100644
75542--- a/fs/reiserfs/item_ops.c
75543+++ b/fs/reiserfs/item_ops.c
75544@@ -724,18 +724,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
75545 }
75546
75547 static struct item_operations errcatch_ops = {
75548- errcatch_bytes_number,
75549- errcatch_decrement_key,
75550- errcatch_is_left_mergeable,
75551- errcatch_print_item,
75552- errcatch_check_item,
75553+ .bytes_number = errcatch_bytes_number,
75554+ .decrement_key = errcatch_decrement_key,
75555+ .is_left_mergeable = errcatch_is_left_mergeable,
75556+ .print_item = errcatch_print_item,
75557+ .check_item = errcatch_check_item,
75558
75559- errcatch_create_vi,
75560- errcatch_check_left,
75561- errcatch_check_right,
75562- errcatch_part_size,
75563- errcatch_unit_num,
75564- errcatch_print_vi
75565+ .create_vi = errcatch_create_vi,
75566+ .check_left = errcatch_check_left,
75567+ .check_right = errcatch_check_right,
75568+ .part_size = errcatch_part_size,
75569+ .unit_num = errcatch_unit_num,
75570+ .print_vi = errcatch_print_vi
75571 };
75572
75573 #if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
75574diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
75575index 621b9f3..af527fd 100644
75576--- a/fs/reiserfs/procfs.c
75577+++ b/fs/reiserfs/procfs.c
75578@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
75579 "SMALL_TAILS " : "NO_TAILS ",
75580 replay_only(sb) ? "REPLAY_ONLY " : "",
75581 convert_reiserfs(sb) ? "CONV " : "",
75582- atomic_read(&r->s_generation_counter),
75583+ atomic_read_unchecked(&r->s_generation_counter),
75584 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
75585 SF(s_do_balance), SF(s_unneeded_left_neighbor),
75586 SF(s_good_search_by_key_reada), SF(s_bmaps),
75587diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
75588index bb79cdd..fcf49ef 100644
75589--- a/fs/reiserfs/reiserfs.h
75590+++ b/fs/reiserfs/reiserfs.h
75591@@ -580,7 +580,7 @@ struct reiserfs_sb_info {
75592 /* Comment? -Hans */
75593 wait_queue_head_t s_wait;
75594 /* increased by one every time the tree gets re-balanced */
75595- atomic_t s_generation_counter;
75596+ atomic_unchecked_t s_generation_counter;
75597
75598 /* File system properties. Currently holds on-disk FS format */
75599 unsigned long s_properties;
75600@@ -2301,7 +2301,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
75601 #define REISERFS_USER_MEM 1 /* user memory mode */
75602
75603 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
75604-#define get_generation(s) atomic_read (&fs_generation(s))
75605+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
75606 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
75607 #define __fs_changed(gen,s) (gen != get_generation (s))
75608 #define fs_changed(gen,s) \
75609diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
75610index 71fbbe3..eff29ba 100644
75611--- a/fs/reiserfs/super.c
75612+++ b/fs/reiserfs/super.c
75613@@ -1868,6 +1868,10 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
75614 sbi->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
75615 sbi->s_mount_opt |= (1 << REISERFS_ERROR_RO);
75616 sbi->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
75617+#ifdef CONFIG_REISERFS_FS_XATTR
75618+ /* turn on user xattrs by default */
75619+ sbi->s_mount_opt |= (1 << REISERFS_XATTRS_USER);
75620+#endif
75621 /* no preallocation minimum, be smart in reiserfs_file_write instead */
75622 sbi->s_alloc_options.preallocmin = 0;
75623 /* Preallocate by 16 blocks (17-1) at once */
75624diff --git a/fs/select.c b/fs/select.c
75625index f684c75..4117611 100644
75626--- a/fs/select.c
75627+++ b/fs/select.c
75628@@ -20,6 +20,7 @@
75629 #include <linux/export.h>
75630 #include <linux/slab.h>
75631 #include <linux/poll.h>
75632+#include <linux/security.h>
75633 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
75634 #include <linux/file.h>
75635 #include <linux/fdtable.h>
75636@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
75637 struct poll_list *walk = head;
75638 unsigned long todo = nfds;
75639
75640+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
75641 if (nfds > rlimit(RLIMIT_NOFILE))
75642 return -EINVAL;
75643
75644diff --git a/fs/seq_file.c b/fs/seq_file.c
75645index 555f821..34684d7 100644
75646--- a/fs/seq_file.c
75647+++ b/fs/seq_file.c
75648@@ -12,6 +12,8 @@
75649 #include <linux/slab.h>
75650 #include <linux/cred.h>
75651 #include <linux/mm.h>
75652+#include <linux/sched.h>
75653+#include <linux/grsecurity.h>
75654
75655 #include <asm/uaccess.h>
75656 #include <asm/page.h>
75657@@ -23,16 +25,7 @@ static void seq_set_overflow(struct seq_file *m)
75658
75659 static void *seq_buf_alloc(unsigned long size)
75660 {
75661- void *buf;
75662-
75663- /*
75664- * __GFP_NORETRY to avoid oom-killings with high-order allocations -
75665- * it's better to fall back to vmalloc() than to kill things.
75666- */
75667- buf = kmalloc(size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
75668- if (!buf && size > PAGE_SIZE)
75669- buf = vmalloc(size);
75670- return buf;
75671+ return kmalloc(size, GFP_KERNEL | GFP_USERCOPY);
75672 }
75673
75674 /**
75675@@ -65,6 +58,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
75676 #ifdef CONFIG_USER_NS
75677 p->user_ns = file->f_cred->user_ns;
75678 #endif
75679+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75680+ p->exec_id = current->exec_id;
75681+#endif
75682
75683 /*
75684 * Wrappers around seq_open(e.g. swaps_open) need to be
75685@@ -87,6 +83,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
75686 }
75687 EXPORT_SYMBOL(seq_open);
75688
75689+
75690+int seq_open_restrict(struct file *file, const struct seq_operations *op)
75691+{
75692+ if (gr_proc_is_restricted())
75693+ return -EACCES;
75694+
75695+ return seq_open(file, op);
75696+}
75697+EXPORT_SYMBOL(seq_open_restrict);
75698+
75699 static int traverse(struct seq_file *m, loff_t offset)
75700 {
75701 loff_t pos = 0, index;
75702@@ -158,7 +164,7 @@ Eoverflow:
75703 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
75704 {
75705 struct seq_file *m = file->private_data;
75706- size_t copied = 0;
75707+ ssize_t copied = 0;
75708 loff_t pos;
75709 size_t n;
75710 void *p;
75711@@ -557,7 +563,7 @@ static void single_stop(struct seq_file *p, void *v)
75712 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
75713 void *data)
75714 {
75715- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
75716+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
75717 int res = -ENOMEM;
75718
75719 if (op) {
75720@@ -593,6 +599,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
75721 }
75722 EXPORT_SYMBOL(single_open_size);
75723
75724+int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *),
75725+ void *data)
75726+{
75727+ if (gr_proc_is_restricted())
75728+ return -EACCES;
75729+
75730+ return single_open(file, show, data);
75731+}
75732+EXPORT_SYMBOL(single_open_restrict);
75733+
75734+
75735 int single_release(struct inode *inode, struct file *file)
75736 {
75737 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
75738diff --git a/fs/splice.c b/fs/splice.c
75739index 7968da9..4ce985b 100644
75740--- a/fs/splice.c
75741+++ b/fs/splice.c
75742@@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
75743 pipe_lock(pipe);
75744
75745 for (;;) {
75746- if (!pipe->readers) {
75747+ if (!atomic_read(&pipe->readers)) {
75748 send_sig(SIGPIPE, current, 0);
75749 if (!ret)
75750 ret = -EPIPE;
75751@@ -216,7 +216,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
75752 page_nr++;
75753 ret += buf->len;
75754
75755- if (pipe->files)
75756+ if (atomic_read(&pipe->files))
75757 do_wakeup = 1;
75758
75759 if (!--spd->nr_pages)
75760@@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
75761 do_wakeup = 0;
75762 }
75763
75764- pipe->waiting_writers++;
75765+ atomic_inc(&pipe->waiting_writers);
75766 pipe_wait(pipe);
75767- pipe->waiting_writers--;
75768+ atomic_dec(&pipe->waiting_writers);
75769 }
75770
75771 pipe_unlock(pipe);
75772@@ -576,7 +576,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
75773 old_fs = get_fs();
75774 set_fs(get_ds());
75775 /* The cast to a user pointer is valid due to the set_fs() */
75776- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
75777+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
75778 set_fs(old_fs);
75779
75780 return res;
75781@@ -591,7 +591,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
75782 old_fs = get_fs();
75783 set_fs(get_ds());
75784 /* The cast to a user pointer is valid due to the set_fs() */
75785- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
75786+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
75787 set_fs(old_fs);
75788
75789 return res;
75790@@ -644,7 +644,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
75791 goto err;
75792
75793 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
75794- vec[i].iov_base = (void __user *) page_address(page);
75795+ vec[i].iov_base = (void __force_user *) page_address(page);
75796 vec[i].iov_len = this_len;
75797 spd.pages[i] = page;
75798 spd.nr_pages++;
75799@@ -783,7 +783,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
75800 ops->release(pipe, buf);
75801 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
75802 pipe->nrbufs--;
75803- if (pipe->files)
75804+ if (atomic_read(&pipe->files))
75805 sd->need_wakeup = true;
75806 }
75807
75808@@ -807,10 +807,10 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
75809 static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
75810 {
75811 while (!pipe->nrbufs) {
75812- if (!pipe->writers)
75813+ if (!atomic_read(&pipe->writers))
75814 return 0;
75815
75816- if (!pipe->waiting_writers && sd->num_spliced)
75817+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
75818 return 0;
75819
75820 if (sd->flags & SPLICE_F_NONBLOCK)
75821@@ -1025,7 +1025,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
75822 ops->release(pipe, buf);
75823 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
75824 pipe->nrbufs--;
75825- if (pipe->files)
75826+ if (atomic_read(&pipe->files))
75827 sd.need_wakeup = true;
75828 } else {
75829 buf->offset += ret;
75830@@ -1159,7 +1159,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
75831 long ret, bytes;
75832 umode_t i_mode;
75833 size_t len;
75834- int i, flags;
75835+ int i, flags, more;
75836
75837 /*
75838 * We require the input being a regular file, as we don't want to
75839@@ -1185,7 +1185,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
75840 * out of the pipe right after the splice_to_pipe(). So set
75841 * PIPE_READERS appropriately.
75842 */
75843- pipe->readers = 1;
75844+ atomic_set(&pipe->readers, 1);
75845
75846 current->splice_pipe = pipe;
75847 }
75848@@ -1202,6 +1202,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
75849 * Don't block on output, we have to drain the direct pipe.
75850 */
75851 sd->flags &= ~SPLICE_F_NONBLOCK;
75852+ more = sd->flags & SPLICE_F_MORE;
75853
75854 while (len) {
75855 size_t read_len;
75856@@ -1215,6 +1216,15 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
75857 sd->total_len = read_len;
75858
75859 /*
75860+ * If more data is pending, set SPLICE_F_MORE
75861+ * If this is the last data and SPLICE_F_MORE was not set
75862+ * initially, clears it.
75863+ */
75864+ if (read_len < len)
75865+ sd->flags |= SPLICE_F_MORE;
75866+ else if (!more)
75867+ sd->flags &= ~SPLICE_F_MORE;
75868+ /*
75869 * NOTE: nonblocking mode only applies to the input. We
75870 * must not do the output in nonblocking mode as then we
75871 * could get stuck data in the internal pipe:
75872@@ -1482,6 +1492,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
75873
75874 partial[buffers].offset = off;
75875 partial[buffers].len = plen;
75876+ partial[buffers].private = 0;
75877
75878 off = 0;
75879 len -= plen;
75880@@ -1718,9 +1729,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
75881 ret = -ERESTARTSYS;
75882 break;
75883 }
75884- if (!pipe->writers)
75885+ if (!atomic_read(&pipe->writers))
75886 break;
75887- if (!pipe->waiting_writers) {
75888+ if (!atomic_read(&pipe->waiting_writers)) {
75889 if (flags & SPLICE_F_NONBLOCK) {
75890 ret = -EAGAIN;
75891 break;
75892@@ -1752,7 +1763,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
75893 pipe_lock(pipe);
75894
75895 while (pipe->nrbufs >= pipe->buffers) {
75896- if (!pipe->readers) {
75897+ if (!atomic_read(&pipe->readers)) {
75898 send_sig(SIGPIPE, current, 0);
75899 ret = -EPIPE;
75900 break;
75901@@ -1765,9 +1776,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
75902 ret = -ERESTARTSYS;
75903 break;
75904 }
75905- pipe->waiting_writers++;
75906+ atomic_inc(&pipe->waiting_writers);
75907 pipe_wait(pipe);
75908- pipe->waiting_writers--;
75909+ atomic_dec(&pipe->waiting_writers);
75910 }
75911
75912 pipe_unlock(pipe);
75913@@ -1803,14 +1814,14 @@ retry:
75914 pipe_double_lock(ipipe, opipe);
75915
75916 do {
75917- if (!opipe->readers) {
75918+ if (!atomic_read(&opipe->readers)) {
75919 send_sig(SIGPIPE, current, 0);
75920 if (!ret)
75921 ret = -EPIPE;
75922 break;
75923 }
75924
75925- if (!ipipe->nrbufs && !ipipe->writers)
75926+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
75927 break;
75928
75929 /*
75930@@ -1907,7 +1918,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
75931 pipe_double_lock(ipipe, opipe);
75932
75933 do {
75934- if (!opipe->readers) {
75935+ if (!atomic_read(&opipe->readers)) {
75936 send_sig(SIGPIPE, current, 0);
75937 if (!ret)
75938 ret = -EPIPE;
75939@@ -1952,7 +1963,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
75940 * return EAGAIN if we have the potential of some data in the
75941 * future, otherwise just return 0
75942 */
75943- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
75944+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
75945 ret = -EAGAIN;
75946
75947 pipe_unlock(ipipe);
75948diff --git a/fs/squashfs/xattr.c b/fs/squashfs/xattr.c
75949index 92fcde7..1687329 100644
75950--- a/fs/squashfs/xattr.c
75951+++ b/fs/squashfs/xattr.c
75952@@ -46,8 +46,8 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
75953 + msblk->xattr_table;
75954 int offset = SQUASHFS_XATTR_OFFSET(squashfs_i(inode)->xattr);
75955 int count = squashfs_i(inode)->xattr_count;
75956- size_t rest = buffer_size;
75957- int err;
75958+ size_t used = 0;
75959+ ssize_t err;
75960
75961 /* check that the file system has xattrs */
75962 if (msblk->xattr_id_table == NULL)
75963@@ -68,11 +68,11 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
75964 name_size = le16_to_cpu(entry.size);
75965 handler = squashfs_xattr_handler(le16_to_cpu(entry.type));
75966 if (handler)
75967- prefix_size = handler->list(d, buffer, rest, NULL,
75968+ prefix_size = handler->list(d, buffer, buffer ? buffer_size - used : 0, NULL,
75969 name_size, handler->flags);
75970 if (prefix_size) {
75971 if (buffer) {
75972- if (prefix_size + name_size + 1 > rest) {
75973+ if (prefix_size + name_size + 1 > buffer_size - used) {
75974 err = -ERANGE;
75975 goto failed;
75976 }
75977@@ -86,7 +86,7 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
75978 buffer[name_size] = '\0';
75979 buffer += name_size + 1;
75980 }
75981- rest -= prefix_size + name_size + 1;
75982+ used += prefix_size + name_size + 1;
75983 } else {
75984 /* no handler or insuffficient privileges, so skip */
75985 err = squashfs_read_metadata(sb, NULL, &start,
75986@@ -107,7 +107,7 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
75987 if (err < 0)
75988 goto failed;
75989 }
75990- err = buffer_size - rest;
75991+ err = used;
75992
75993 failed:
75994 return err;
75995diff --git a/fs/stat.c b/fs/stat.c
75996index ae0c3ce..9ee641c 100644
75997--- a/fs/stat.c
75998+++ b/fs/stat.c
75999@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
76000 stat->gid = inode->i_gid;
76001 stat->rdev = inode->i_rdev;
76002 stat->size = i_size_read(inode);
76003- stat->atime = inode->i_atime;
76004- stat->mtime = inode->i_mtime;
76005+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
76006+ stat->atime = inode->i_ctime;
76007+ stat->mtime = inode->i_ctime;
76008+ } else {
76009+ stat->atime = inode->i_atime;
76010+ stat->mtime = inode->i_mtime;
76011+ }
76012 stat->ctime = inode->i_ctime;
76013 stat->blksize = (1 << inode->i_blkbits);
76014 stat->blocks = inode->i_blocks;
76015@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
76016 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
76017 {
76018 struct inode *inode = path->dentry->d_inode;
76019+ int retval;
76020
76021- if (inode->i_op->getattr)
76022- return inode->i_op->getattr(path->mnt, path->dentry, stat);
76023+ if (inode->i_op->getattr) {
76024+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
76025+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
76026+ stat->atime = stat->ctime;
76027+ stat->mtime = stat->ctime;
76028+ }
76029+ return retval;
76030+ }
76031
76032 generic_fillattr(inode, stat);
76033 return 0;
76034diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
76035index 0b45ff4..edf9d3a 100644
76036--- a/fs/sysfs/dir.c
76037+++ b/fs/sysfs/dir.c
76038@@ -33,6 +33,10 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
76039 kfree(buf);
76040 }
76041
76042+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
76043+extern int grsec_enable_sysfs_restrict;
76044+#endif
76045+
76046 /**
76047 * sysfs_create_dir_ns - create a directory for an object with a namespace tag
76048 * @kobj: object we're creating directory for
76049@@ -41,9 +45,16 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
76050 int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
76051 {
76052 struct kernfs_node *parent, *kn;
76053+ const char *name;
76054+ umode_t mode = S_IRWXU | S_IRUGO | S_IXUGO;
76055+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
76056+ const char *parent_name;
76057+#endif
76058
76059 BUG_ON(!kobj);
76060
76061+ name = kobject_name(kobj);
76062+
76063 if (kobj->parent)
76064 parent = kobj->parent->sd;
76065 else
76066@@ -52,11 +63,24 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
76067 if (!parent)
76068 return -ENOENT;
76069
76070- kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
76071- S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
76072+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
76073+ parent_name = parent->name;
76074+ mode = S_IRWXU;
76075+
76076+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
76077+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
76078+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
76079+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
76080+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
76081+ if (!grsec_enable_sysfs_restrict)
76082+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
76083+#endif
76084+
76085+ kn = kernfs_create_dir_ns(parent, name,
76086+ mode, kobj, ns);
76087 if (IS_ERR(kn)) {
76088 if (PTR_ERR(kn) == -EEXIST)
76089- sysfs_warn_dup(parent, kobject_name(kobj));
76090+ sysfs_warn_dup(parent, name);
76091 return PTR_ERR(kn);
76092 }
76093
76094diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
76095index 69d4889..a810bd4 100644
76096--- a/fs/sysv/sysv.h
76097+++ b/fs/sysv/sysv.h
76098@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
76099 #endif
76100 }
76101
76102-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
76103+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
76104 {
76105 if (sbi->s_bytesex == BYTESEX_PDP)
76106 return PDP_swab((__force __u32)n);
76107diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
76108index fb08b0c..65fcc7e 100644
76109--- a/fs/ubifs/io.c
76110+++ b/fs/ubifs/io.c
76111@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
76112 return err;
76113 }
76114
76115-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
76116+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
76117 {
76118 int err;
76119
76120diff --git a/fs/udf/misc.c b/fs/udf/misc.c
76121index c175b4d..8f36a16 100644
76122--- a/fs/udf/misc.c
76123+++ b/fs/udf/misc.c
76124@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
76125
76126 u8 udf_tag_checksum(const struct tag *t)
76127 {
76128- u8 *data = (u8 *)t;
76129+ const u8 *data = (const u8 *)t;
76130 u8 checksum = 0;
76131 int i;
76132 for (i = 0; i < sizeof(struct tag); ++i)
76133diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
76134index 8d974c4..b82f6ec 100644
76135--- a/fs/ufs/swab.h
76136+++ b/fs/ufs/swab.h
76137@@ -22,7 +22,7 @@ enum {
76138 BYTESEX_BE
76139 };
76140
76141-static inline u64
76142+static inline u64 __intentional_overflow(-1)
76143 fs64_to_cpu(struct super_block *sbp, __fs64 n)
76144 {
76145 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
76146@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
76147 return (__force __fs64)cpu_to_be64(n);
76148 }
76149
76150-static inline u32
76151+static inline u32 __intentional_overflow(-1)
76152 fs32_to_cpu(struct super_block *sbp, __fs32 n)
76153 {
76154 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
76155diff --git a/fs/utimes.c b/fs/utimes.c
76156index aa138d6..5f3a811 100644
76157--- a/fs/utimes.c
76158+++ b/fs/utimes.c
76159@@ -1,6 +1,7 @@
76160 #include <linux/compiler.h>
76161 #include <linux/file.h>
76162 #include <linux/fs.h>
76163+#include <linux/security.h>
76164 #include <linux/linkage.h>
76165 #include <linux/mount.h>
76166 #include <linux/namei.h>
76167@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
76168 }
76169 }
76170 retry_deleg:
76171+
76172+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
76173+ error = -EACCES;
76174+ goto mnt_drop_write_and_out;
76175+ }
76176+
76177 mutex_lock(&inode->i_mutex);
76178 error = notify_change(path->dentry, &newattrs, &delegated_inode);
76179 mutex_unlock(&inode->i_mutex);
76180diff --git a/fs/xattr.c b/fs/xattr.c
76181index 4ef6985..a6cd6567 100644
76182--- a/fs/xattr.c
76183+++ b/fs/xattr.c
76184@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
76185 return rc;
76186 }
76187
76188+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
76189+ssize_t
76190+pax_getxattr(struct dentry *dentry, void *value, size_t size)
76191+{
76192+ struct inode *inode = dentry->d_inode;
76193+ ssize_t error;
76194+
76195+ error = inode_permission(inode, MAY_EXEC);
76196+ if (error)
76197+ return error;
76198+
76199+ if (inode->i_op->getxattr)
76200+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
76201+ else
76202+ error = -EOPNOTSUPP;
76203+
76204+ return error;
76205+}
76206+EXPORT_SYMBOL(pax_getxattr);
76207+#endif
76208+
76209 ssize_t
76210 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
76211 {
76212@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
76213 * Extended attribute SET operations
76214 */
76215 static long
76216-setxattr(struct dentry *d, const char __user *name, const void __user *value,
76217+setxattr(struct path *path, const char __user *name, const void __user *value,
76218 size_t size, int flags)
76219 {
76220 int error;
76221@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
76222 posix_acl_fix_xattr_from_user(kvalue, size);
76223 }
76224
76225- error = vfs_setxattr(d, kname, kvalue, size, flags);
76226+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
76227+ error = -EACCES;
76228+ goto out;
76229+ }
76230+
76231+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
76232 out:
76233 if (vvalue)
76234 vfree(vvalue);
76235@@ -376,7 +402,7 @@ retry:
76236 return error;
76237 error = mnt_want_write(path.mnt);
76238 if (!error) {
76239- error = setxattr(path.dentry, name, value, size, flags);
76240+ error = setxattr(&path, name, value, size, flags);
76241 mnt_drop_write(path.mnt);
76242 }
76243 path_put(&path);
76244@@ -412,7 +438,7 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
76245 audit_file(f.file);
76246 error = mnt_want_write_file(f.file);
76247 if (!error) {
76248- error = setxattr(f.file->f_path.dentry, name, value, size, flags);
76249+ error = setxattr(&f.file->f_path, name, value, size, flags);
76250 mnt_drop_write_file(f.file);
76251 }
76252 fdput(f);
76253@@ -598,7 +624,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
76254 * Extended attribute REMOVE operations
76255 */
76256 static long
76257-removexattr(struct dentry *d, const char __user *name)
76258+removexattr(struct path *path, const char __user *name)
76259 {
76260 int error;
76261 char kname[XATTR_NAME_MAX + 1];
76262@@ -609,7 +635,10 @@ removexattr(struct dentry *d, const char __user *name)
76263 if (error < 0)
76264 return error;
76265
76266- return vfs_removexattr(d, kname);
76267+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
76268+ return -EACCES;
76269+
76270+ return vfs_removexattr(path->dentry, kname);
76271 }
76272
76273 static int path_removexattr(const char __user *pathname,
76274@@ -623,7 +652,7 @@ retry:
76275 return error;
76276 error = mnt_want_write(path.mnt);
76277 if (!error) {
76278- error = removexattr(path.dentry, name);
76279+ error = removexattr(&path, name);
76280 mnt_drop_write(path.mnt);
76281 }
76282 path_put(&path);
76283@@ -649,14 +678,16 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
76284 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
76285 {
76286 struct fd f = fdget(fd);
76287+ struct path *path;
76288 int error = -EBADF;
76289
76290 if (!f.file)
76291 return error;
76292+ path = &f.file->f_path;
76293 audit_file(f.file);
76294 error = mnt_want_write_file(f.file);
76295 if (!error) {
76296- error = removexattr(f.file->f_path.dentry, name);
76297+ error = removexattr(path, name);
76298 mnt_drop_write_file(f.file);
76299 }
76300 fdput(f);
76301diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
76302index 61ec015..7c18807 100644
76303--- a/fs/xfs/libxfs/xfs_bmap.c
76304+++ b/fs/xfs/libxfs/xfs_bmap.c
76305@@ -580,7 +580,7 @@ xfs_bmap_validate_ret(
76306
76307 #else
76308 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
76309-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
76310+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
76311 #endif /* DEBUG */
76312
76313 /*
76314diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
76315index 098cd78..724d3f8 100644
76316--- a/fs/xfs/xfs_dir2_readdir.c
76317+++ b/fs/xfs/xfs_dir2_readdir.c
76318@@ -140,7 +140,12 @@ xfs_dir2_sf_getdents(
76319 ino = dp->d_ops->sf_get_ino(sfp, sfep);
76320 filetype = dp->d_ops->sf_get_ftype(sfep);
76321 ctx->pos = off & 0x7fffffff;
76322- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
76323+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
76324+ char name[sfep->namelen];
76325+ memcpy(name, sfep->name, sfep->namelen);
76326+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(dp->i_mount, filetype)))
76327+ return 0;
76328+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
76329 xfs_dir3_get_dtype(dp->i_mount, filetype)))
76330 return 0;
76331 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
76332diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
76333index ac4feae..386d551 100644
76334--- a/fs/xfs/xfs_ioctl.c
76335+++ b/fs/xfs/xfs_ioctl.c
76336@@ -120,7 +120,7 @@ xfs_find_handle(
76337 }
76338
76339 error = -EFAULT;
76340- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
76341+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
76342 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
76343 goto out_put;
76344
76345diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
76346index c31d2c2..6ec8f62 100644
76347--- a/fs/xfs/xfs_linux.h
76348+++ b/fs/xfs/xfs_linux.h
76349@@ -234,7 +234,7 @@ static inline kgid_t xfs_gid_to_kgid(__uint32_t gid)
76350 * of the compiler which do not like us using do_div in the middle
76351 * of large functions.
76352 */
76353-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
76354+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
76355 {
76356 __u32 mod;
76357
76358@@ -290,7 +290,7 @@ static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
76359 return 0;
76360 }
76361 #else
76362-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
76363+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
76364 {
76365 __u32 mod;
76366
76367diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
76368new file mode 100644
76369index 0000000..31f8fe4
76370--- /dev/null
76371+++ b/grsecurity/Kconfig
76372@@ -0,0 +1,1182 @@
76373+#
76374+# grecurity configuration
76375+#
76376+menu "Memory Protections"
76377+depends on GRKERNSEC
76378+
76379+config GRKERNSEC_KMEM
76380+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
76381+ default y if GRKERNSEC_CONFIG_AUTO
76382+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
76383+ help
76384+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
76385+ be written to or read from to modify or leak the contents of the running
76386+ kernel. /dev/port will also not be allowed to be opened, writing to
76387+ /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
76388+ If you have module support disabled, enabling this will close up several
76389+ ways that are currently used to insert malicious code into the running
76390+ kernel.
76391+
76392+ Even with this feature enabled, we still highly recommend that
76393+ you use the RBAC system, as it is still possible for an attacker to
76394+ modify the running kernel through other more obscure methods.
76395+
76396+ It is highly recommended that you say Y here if you meet all the
76397+ conditions above.
76398+
76399+config GRKERNSEC_VM86
76400+ bool "Restrict VM86 mode"
76401+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
76402+ depends on X86_32
76403+
76404+ help
76405+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
76406+ make use of a special execution mode on 32bit x86 processors called
76407+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
76408+ video cards and will still work with this option enabled. The purpose
76409+ of the option is to prevent exploitation of emulation errors in
76410+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
76411+ Nearly all users should be able to enable this option.
76412+
76413+config GRKERNSEC_IO
76414+ bool "Disable privileged I/O"
76415+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
76416+ depends on X86
76417+ select RTC_CLASS
76418+ select RTC_INTF_DEV
76419+ select RTC_DRV_CMOS
76420+
76421+ help
76422+ If you say Y here, all ioperm and iopl calls will return an error.
76423+ Ioperm and iopl can be used to modify the running kernel.
76424+ Unfortunately, some programs need this access to operate properly,
76425+ the most notable of which are XFree86 and hwclock. hwclock can be
76426+ remedied by having RTC support in the kernel, so real-time
76427+ clock support is enabled if this option is enabled, to ensure
76428+ that hwclock operates correctly. If hwclock still does not work,
76429+ either update udev or symlink /dev/rtc to /dev/rtc0.
76430+
76431+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
76432+ you may not be able to boot into a graphical environment with this
76433+ option enabled. In this case, you should use the RBAC system instead.
76434+
76435+config GRKERNSEC_BPF_HARDEN
76436+ bool "Harden BPF interpreter"
76437+ default y if GRKERNSEC_CONFIG_AUTO
76438+ help
76439+ Unlike previous versions of grsecurity that hardened both the BPF
76440+ interpreted code against corruption at rest as well as the JIT code
76441+ against JIT-spray attacks and attacker-controlled immediate values
76442+ for ROP, this feature will enforce disabling of the new eBPF JIT engine
76443+ and will ensure the interpreted code is read-only at rest. This feature
76444+ may be removed at a later time when eBPF stabilizes to entirely revert
76445+ back to the more secure pre-3.16 BPF interpreter/JIT.
76446+
76447+ If you're using KERNEXEC, it's recommended that you enable this option
76448+ to supplement the hardening of the kernel.
76449+
76450+config GRKERNSEC_PERF_HARDEN
76451+ bool "Disable unprivileged PERF_EVENTS usage by default"
76452+ default y if GRKERNSEC_CONFIG_AUTO
76453+ depends on PERF_EVENTS
76454+ help
76455+ If you say Y here, the range of acceptable values for the
76456+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
76457+ default to a new value: 3. When the sysctl is set to this value, no
76458+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
76459+
76460+ Though PERF_EVENTS can be used legitimately for performance monitoring
76461+ and low-level application profiling, it is forced on regardless of
76462+ configuration, has been at fault for several vulnerabilities, and
76463+ creates new opportunities for side channels and other information leaks.
76464+
76465+ This feature puts PERF_EVENTS into a secure default state and permits
76466+ the administrator to change out of it temporarily if unprivileged
76467+ application profiling is needed.
76468+
76469+config GRKERNSEC_RAND_THREADSTACK
76470+ bool "Insert random gaps between thread stacks"
76471+ default y if GRKERNSEC_CONFIG_AUTO
76472+ depends on PAX_RANDMMAP && !PPC
76473+ help
76474+ If you say Y here, a random-sized gap will be enforced between allocated
76475+ thread stacks. Glibc's NPTL and other threading libraries that
76476+ pass MAP_STACK to the kernel for thread stack allocation are supported.
76477+ The implementation currently provides 8 bits of entropy for the gap.
76478+
76479+ Many distributions do not compile threaded remote services with the
76480+ -fstack-check argument to GCC, causing the variable-sized stack-based
76481+ allocator, alloca(), to not probe the stack on allocation. This
76482+ permits an unbounded alloca() to skip over any guard page and potentially
76483+ modify another thread's stack reliably. An enforced random gap
76484+ reduces the reliability of such an attack and increases the chance
76485+ that such a read/write to another thread's stack instead lands in
76486+ an unmapped area, causing a crash and triggering grsecurity's
76487+ anti-bruteforcing logic.
76488+
76489+config GRKERNSEC_PROC_MEMMAP
76490+ bool "Harden ASLR against information leaks and entropy reduction"
76491+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
76492+ depends on PAX_NOEXEC || PAX_ASLR
76493+ help
76494+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
76495+ give no information about the addresses of its mappings if
76496+ PaX features that rely on random addresses are enabled on the task.
76497+ In addition to sanitizing this information and disabling other
76498+ dangerous sources of information, this option causes reads of sensitive
76499+ /proc/<pid> entries where the file descriptor was opened in a different
76500+ task than the one performing the read. Such attempts are logged.
76501+ This option also limits argv/env strings for suid/sgid binaries
76502+ to 512KB to prevent a complete exhaustion of the stack entropy provided
76503+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
76504+ binaries to prevent alternative mmap layouts from being abused.
76505+
76506+ If you use PaX it is essential that you say Y here as it closes up
76507+ several holes that make full ASLR useless locally.
76508+
76509+
76510+config GRKERNSEC_KSTACKOVERFLOW
76511+ bool "Prevent kernel stack overflows"
76512+ default y if GRKERNSEC_CONFIG_AUTO
76513+ depends on !IA64 && 64BIT
76514+ help
76515+ If you say Y here, the kernel's process stacks will be allocated
76516+ with vmalloc instead of the kernel's default allocator. This
76517+ introduces guard pages that in combination with the alloca checking
76518+ of the STACKLEAK feature prevents all forms of kernel process stack
76519+ overflow abuse. Note that this is different from kernel stack
76520+ buffer overflows.
76521+
76522+config GRKERNSEC_BRUTE
76523+ bool "Deter exploit bruteforcing"
76524+ default y if GRKERNSEC_CONFIG_AUTO
76525+ help
76526+ If you say Y here, attempts to bruteforce exploits against forking
76527+ daemons such as apache or sshd, as well as against suid/sgid binaries
76528+ will be deterred. When a child of a forking daemon is killed by PaX
76529+ or crashes due to an illegal instruction or other suspicious signal,
76530+ the parent process will be delayed 30 seconds upon every subsequent
76531+ fork until the administrator is able to assess the situation and
76532+ restart the daemon.
76533+ In the suid/sgid case, the attempt is logged, the user has all their
76534+ existing instances of the suid/sgid binary terminated and will
76535+ be unable to execute any suid/sgid binaries for 15 minutes.
76536+
76537+ It is recommended that you also enable signal logging in the auditing
76538+ section so that logs are generated when a process triggers a suspicious
76539+ signal.
76540+ If the sysctl option is enabled, a sysctl option with name
76541+ "deter_bruteforce" is created.
76542+
76543+config GRKERNSEC_MODHARDEN
76544+ bool "Harden module auto-loading"
76545+ default y if GRKERNSEC_CONFIG_AUTO
76546+ depends on MODULES
76547+ help
76548+ If you say Y here, module auto-loading in response to use of some
76549+ feature implemented by an unloaded module will be restricted to
76550+ root users. Enabling this option helps defend against attacks
76551+ by unprivileged users who abuse the auto-loading behavior to
76552+ cause a vulnerable module to load that is then exploited.
76553+
76554+ If this option prevents a legitimate use of auto-loading for a
76555+ non-root user, the administrator can execute modprobe manually
76556+ with the exact name of the module mentioned in the alert log.
76557+ Alternatively, the administrator can add the module to the list
76558+ of modules loaded at boot by modifying init scripts.
76559+
76560+ Modification of init scripts will most likely be needed on
76561+ Ubuntu servers with encrypted home directory support enabled,
76562+ as the first non-root user logging in will cause the ecb(aes),
76563+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
76564+
76565+config GRKERNSEC_HIDESYM
76566+ bool "Hide kernel symbols"
76567+ default y if GRKERNSEC_CONFIG_AUTO
76568+ select PAX_USERCOPY_SLABS
76569+ help
76570+ If you say Y here, getting information on loaded modules, and
76571+ displaying all kernel symbols through a syscall will be restricted
76572+ to users with CAP_SYS_MODULE. For software compatibility reasons,
76573+ /proc/kallsyms will be restricted to the root user. The RBAC
76574+ system can hide that entry even from root.
76575+
76576+ This option also prevents leaking of kernel addresses through
76577+ several /proc entries.
76578+
76579+ Note that this option is only effective provided the following
76580+ conditions are met:
76581+ 1) The kernel using grsecurity is not precompiled by some distribution
76582+ 2) You have also enabled GRKERNSEC_DMESG
76583+ 3) You are using the RBAC system and hiding other files such as your
76584+ kernel image and System.map. Alternatively, enabling this option
76585+ causes the permissions on /boot, /lib/modules, and the kernel
76586+ source directory to change at compile time to prevent
76587+ reading by non-root users.
76588+ If the above conditions are met, this option will aid in providing a
76589+ useful protection against local kernel exploitation of overflows
76590+ and arbitrary read/write vulnerabilities.
76591+
76592+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
76593+ in addition to this feature.
76594+
76595+config GRKERNSEC_RANDSTRUCT
76596+ bool "Randomize layout of sensitive kernel structures"
76597+ default y if GRKERNSEC_CONFIG_AUTO
76598+ select GRKERNSEC_HIDESYM
76599+ select MODVERSIONS if MODULES
76600+ help
76601+ If you say Y here, the layouts of a number of sensitive kernel
76602+ structures (task, fs, cred, etc) and all structures composed entirely
76603+ of function pointers (aka "ops" structs) will be randomized at compile-time.
76604+ This can introduce the requirement of an additional infoleak
76605+ vulnerability for exploits targeting these structure types.
76606+
76607+ Enabling this feature will introduce some performance impact, slightly
76608+ increase memory usage, and prevent the use of forensic tools like
76609+ Volatility against the system (unless the kernel source tree isn't
76610+ cleaned after kernel installation).
76611+
76612+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
76613+ It remains after a make clean to allow for external modules to be compiled
76614+ with the existing seed and will be removed by a make mrproper or
76615+ make distclean.
76616+
76617+ Note that the implementation requires gcc 4.6.4. or newer. You may need
76618+ to install the supporting headers explicitly in addition to the normal
76619+ gcc package.
76620+
76621+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
76622+ bool "Use cacheline-aware structure randomization"
76623+ depends on GRKERNSEC_RANDSTRUCT
76624+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
76625+ help
76626+ If you say Y here, the RANDSTRUCT randomization will make a best effort
76627+ at restricting randomization to cacheline-sized groups of elements. It
76628+ will further not randomize bitfields in structures. This reduces the
76629+ performance hit of RANDSTRUCT at the cost of weakened randomization.
76630+
76631+config GRKERNSEC_KERN_LOCKOUT
76632+ bool "Active kernel exploit response"
76633+ default y if GRKERNSEC_CONFIG_AUTO
76634+ depends on X86 || ARM || PPC || SPARC
76635+ help
76636+ If you say Y here, when a PaX alert is triggered due to suspicious
76637+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
76638+ or an OOPS occurs due to bad memory accesses, instead of just
76639+ terminating the offending process (and potentially allowing
76640+ a subsequent exploit from the same user), we will take one of two
76641+ actions:
76642+ If the user was root, we will panic the system
76643+ If the user was non-root, we will log the attempt, terminate
76644+ all processes owned by the user, then prevent them from creating
76645+ any new processes until the system is restarted
76646+ This deters repeated kernel exploitation/bruteforcing attempts
76647+ and is useful for later forensics.
76648+
76649+config GRKERNSEC_OLD_ARM_USERLAND
76650+ bool "Old ARM userland compatibility"
76651+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
76652+ help
76653+ If you say Y here, stubs of executable code to perform such operations
76654+ as "compare-exchange" will be placed at fixed locations in the ARM vector
76655+ table. This is unfortunately needed for old ARM userland meant to run
76656+ across a wide range of processors. Without this option enabled,
76657+ the get_tls and data memory barrier stubs will be emulated by the kernel,
76658+ which is enough for Linaro userlands or other userlands designed for v6
76659+ and newer ARM CPUs. It's recommended that you try without this option enabled
76660+ first, and only enable it if your userland does not boot (it will likely fail
76661+ at init time).
76662+
76663+endmenu
76664+menu "Role Based Access Control Options"
76665+depends on GRKERNSEC
76666+
76667+config GRKERNSEC_RBAC_DEBUG
76668+ bool
76669+
76670+config GRKERNSEC_NO_RBAC
76671+ bool "Disable RBAC system"
76672+ help
76673+ If you say Y here, the /dev/grsec device will be removed from the kernel,
76674+ preventing the RBAC system from being enabled. You should only say Y
76675+ here if you have no intention of using the RBAC system, so as to prevent
76676+ an attacker with root access from misusing the RBAC system to hide files
76677+ and processes when loadable module support and /dev/[k]mem have been
76678+ locked down.
76679+
76680+config GRKERNSEC_ACL_HIDEKERN
76681+ bool "Hide kernel processes"
76682+ help
76683+ If you say Y here, all kernel threads will be hidden to all
76684+ processes but those whose subject has the "view hidden processes"
76685+ flag.
76686+
76687+config GRKERNSEC_ACL_MAXTRIES
76688+ int "Maximum tries before password lockout"
76689+ default 3
76690+ help
76691+ This option enforces the maximum number of times a user can attempt
76692+ to authorize themselves with the grsecurity RBAC system before being
76693+ denied the ability to attempt authorization again for a specified time.
76694+ The lower the number, the harder it will be to brute-force a password.
76695+
76696+config GRKERNSEC_ACL_TIMEOUT
76697+ int "Time to wait after max password tries, in seconds"
76698+ default 30
76699+ help
76700+ This option specifies the time the user must wait after attempting to
76701+ authorize to the RBAC system with the maximum number of invalid
76702+ passwords. The higher the number, the harder it will be to brute-force
76703+ a password.
76704+
76705+endmenu
76706+menu "Filesystem Protections"
76707+depends on GRKERNSEC
76708+
76709+config GRKERNSEC_PROC
76710+ bool "Proc restrictions"
76711+ default y if GRKERNSEC_CONFIG_AUTO
76712+ help
76713+ If you say Y here, the permissions of the /proc filesystem
76714+ will be altered to enhance system security and privacy. You MUST
76715+ choose either a user only restriction or a user and group restriction.
76716+ Depending upon the option you choose, you can either restrict users to
76717+ see only the processes they themselves run, or choose a group that can
76718+ view all processes and files normally restricted to root if you choose
76719+ the "restrict to user only" option. NOTE: If you're running identd or
76720+ ntpd as a non-root user, you will have to run it as the group you
76721+ specify here.
76722+
76723+config GRKERNSEC_PROC_USER
76724+ bool "Restrict /proc to user only"
76725+ depends on GRKERNSEC_PROC
76726+ help
76727+ If you say Y here, non-root users will only be able to view their own
76728+ processes, and restricts them from viewing network-related information,
76729+ and viewing kernel symbol and module information.
76730+
76731+config GRKERNSEC_PROC_USERGROUP
76732+ bool "Allow special group"
76733+ default y if GRKERNSEC_CONFIG_AUTO
76734+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
76735+ help
76736+ If you say Y here, you will be able to select a group that will be
76737+ able to view all processes and network-related information. If you've
76738+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
76739+ remain hidden. This option is useful if you want to run identd as
76740+ a non-root user. The group you select may also be chosen at boot time
76741+ via "grsec_proc_gid=" on the kernel commandline.
76742+
76743+config GRKERNSEC_PROC_GID
76744+ int "GID for special group"
76745+ depends on GRKERNSEC_PROC_USERGROUP
76746+ default 1001
76747+
76748+config GRKERNSEC_PROC_ADD
76749+ bool "Additional restrictions"
76750+ default y if GRKERNSEC_CONFIG_AUTO
76751+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
76752+ help
76753+ If you say Y here, additional restrictions will be placed on
76754+ /proc that keep normal users from viewing device information and
76755+ slabinfo information that could be useful for exploits.
76756+
76757+config GRKERNSEC_LINK
76758+ bool "Linking restrictions"
76759+ default y if GRKERNSEC_CONFIG_AUTO
76760+ help
76761+ If you say Y here, /tmp race exploits will be prevented, since users
76762+ will no longer be able to follow symlinks owned by other users in
76763+ world-writable +t directories (e.g. /tmp), unless the owner of the
76764+ symlink is the owner of the directory. users will also not be
76765+ able to hardlink to files they do not own. If the sysctl option is
76766+ enabled, a sysctl option with name "linking_restrictions" is created.
76767+
76768+config GRKERNSEC_SYMLINKOWN
76769+ bool "Kernel-enforced SymlinksIfOwnerMatch"
76770+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
76771+ help
76772+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
76773+ that prevents it from being used as a security feature. As Apache
76774+ verifies the symlink by performing a stat() against the target of
76775+ the symlink before it is followed, an attacker can setup a symlink
76776+ to point to a same-owned file, then replace the symlink with one
76777+ that targets another user's file just after Apache "validates" the
76778+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
76779+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
76780+ will be in place for the group you specify. If the sysctl option
76781+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
76782+ created.
76783+
76784+config GRKERNSEC_SYMLINKOWN_GID
76785+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
76786+ depends on GRKERNSEC_SYMLINKOWN
76787+ default 1006
76788+ help
76789+ Setting this GID determines what group kernel-enforced
76790+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
76791+ is enabled, a sysctl option with name "symlinkown_gid" is created.
76792+
76793+config GRKERNSEC_FIFO
76794+ bool "FIFO restrictions"
76795+ default y if GRKERNSEC_CONFIG_AUTO
76796+ help
76797+ If you say Y here, users will not be able to write to FIFOs they don't
76798+ own in world-writable +t directories (e.g. /tmp), unless the owner of
76799+ the FIFO is the same owner of the directory it's held in. If the sysctl
76800+ option is enabled, a sysctl option with name "fifo_restrictions" is
76801+ created.
76802+
76803+config GRKERNSEC_SYSFS_RESTRICT
76804+ bool "Sysfs/debugfs restriction"
76805+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
76806+ depends on SYSFS
76807+ help
76808+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
76809+ any filesystem normally mounted under it (e.g. debugfs) will be
76810+ mostly accessible only by root. These filesystems generally provide access
76811+ to hardware and debug information that isn't appropriate for unprivileged
76812+ users of the system. Sysfs and debugfs have also become a large source
76813+ of new vulnerabilities, ranging from infoleaks to local compromise.
76814+ There has been very little oversight with an eye toward security involved
76815+ in adding new exporters of information to these filesystems, so their
76816+ use is discouraged.
76817+ For reasons of compatibility, a few directories have been whitelisted
76818+ for access by non-root users:
76819+ /sys/fs/selinux
76820+ /sys/fs/fuse
76821+ /sys/devices/system/cpu
76822+
76823+config GRKERNSEC_ROFS
76824+ bool "Runtime read-only mount protection"
76825+ depends on SYSCTL
76826+ help
76827+ If you say Y here, a sysctl option with name "romount_protect" will
76828+ be created. By setting this option to 1 at runtime, filesystems
76829+ will be protected in the following ways:
76830+ * No new writable mounts will be allowed
76831+ * Existing read-only mounts won't be able to be remounted read/write
76832+ * Write operations will be denied on all block devices
76833+ This option acts independently of grsec_lock: once it is set to 1,
76834+ it cannot be turned off. Therefore, please be mindful of the resulting
76835+ behavior if this option is enabled in an init script on a read-only
76836+ filesystem.
76837+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
76838+ and GRKERNSEC_IO should be enabled and module loading disabled via
76839+ config or at runtime.
76840+ This feature is mainly intended for secure embedded systems.
76841+
76842+
76843+config GRKERNSEC_DEVICE_SIDECHANNEL
76844+ bool "Eliminate stat/notify-based device sidechannels"
76845+ default y if GRKERNSEC_CONFIG_AUTO
76846+ help
76847+ If you say Y here, timing analyses on block or character
76848+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
76849+ will be thwarted for unprivileged users. If a process without
76850+ CAP_MKNOD stats such a device, the last access and last modify times
76851+ will match the device's create time. No access or modify events
76852+ will be triggered through inotify/dnotify/fanotify for such devices.
76853+ This feature will prevent attacks that may at a minimum
76854+ allow an attacker to determine the administrator's password length.
76855+
76856+config GRKERNSEC_CHROOT
76857+ bool "Chroot jail restrictions"
76858+ default y if GRKERNSEC_CONFIG_AUTO
76859+ help
76860+ If you say Y here, you will be able to choose several options that will
76861+ make breaking out of a chrooted jail much more difficult. If you
76862+ encounter no software incompatibilities with the following options, it
76863+ is recommended that you enable each one.
76864+
76865+ Note that the chroot restrictions are not intended to apply to "chroots"
76866+ to directories that are simple bind mounts of the global root filesystem.
76867+ For several other reasons, a user shouldn't expect any significant
76868+ security by performing such a chroot.
76869+
76870+config GRKERNSEC_CHROOT_MOUNT
76871+ bool "Deny mounts"
76872+ default y if GRKERNSEC_CONFIG_AUTO
76873+ depends on GRKERNSEC_CHROOT
76874+ help
76875+ If you say Y here, processes inside a chroot will not be able to
76876+ mount or remount filesystems. If the sysctl option is enabled, a
76877+ sysctl option with name "chroot_deny_mount" is created.
76878+
76879+config GRKERNSEC_CHROOT_DOUBLE
76880+ bool "Deny double-chroots"
76881+ default y if GRKERNSEC_CONFIG_AUTO
76882+ depends on GRKERNSEC_CHROOT
76883+ help
76884+ If you say Y here, processes inside a chroot will not be able to chroot
76885+ again outside the chroot. This is a widely used method of breaking
76886+ out of a chroot jail and should not be allowed. If the sysctl
76887+ option is enabled, a sysctl option with name
76888+ "chroot_deny_chroot" is created.
76889+
76890+config GRKERNSEC_CHROOT_PIVOT
76891+ bool "Deny pivot_root in chroot"
76892+ default y if GRKERNSEC_CONFIG_AUTO
76893+ depends on GRKERNSEC_CHROOT
76894+ help
76895+ If you say Y here, processes inside a chroot will not be able to use
76896+ a function called pivot_root() that was introduced in Linux 2.3.41. It
76897+ works similar to chroot in that it changes the root filesystem. This
76898+ function could be misused in a chrooted process to attempt to break out
76899+ of the chroot, and therefore should not be allowed. If the sysctl
76900+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
76901+ created.
76902+
76903+config GRKERNSEC_CHROOT_CHDIR
76904+ bool "Enforce chdir(\"/\") on all chroots"
76905+ default y if GRKERNSEC_CONFIG_AUTO
76906+ depends on GRKERNSEC_CHROOT
76907+ help
76908+ If you say Y here, the current working directory of all newly-chrooted
76909+ applications will be set to the the root directory of the chroot.
76910+ The man page on chroot(2) states:
76911+ Note that this call does not change the current working
76912+ directory, so that `.' can be outside the tree rooted at
76913+ `/'. In particular, the super-user can escape from a
76914+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
76915+
76916+ It is recommended that you say Y here, since it's not known to break
76917+ any software. If the sysctl option is enabled, a sysctl option with
76918+ name "chroot_enforce_chdir" is created.
76919+
76920+config GRKERNSEC_CHROOT_CHMOD
76921+ bool "Deny (f)chmod +s"
76922+ default y if GRKERNSEC_CONFIG_AUTO
76923+ depends on GRKERNSEC_CHROOT
76924+ help
76925+ If you say Y here, processes inside a chroot will not be able to chmod
76926+ or fchmod files to make them have suid or sgid bits. This protects
76927+ against another published method of breaking a chroot. If the sysctl
76928+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
76929+ created.
76930+
76931+config GRKERNSEC_CHROOT_FCHDIR
76932+ bool "Deny fchdir and fhandle out of chroot"
76933+ default y if GRKERNSEC_CONFIG_AUTO
76934+ depends on GRKERNSEC_CHROOT
76935+ help
76936+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
76937+ to a file descriptor of the chrooting process that points to a directory
76938+ outside the filesystem will be stopped. Additionally, this option prevents
76939+ use of the recently-created syscall for opening files by a guessable "file
76940+ handle" inside a chroot. If the sysctl option is enabled, a sysctl option
76941+ with name "chroot_deny_fchdir" is created.
76942+
76943+config GRKERNSEC_CHROOT_MKNOD
76944+ bool "Deny mknod"
76945+ default y if GRKERNSEC_CONFIG_AUTO
76946+ depends on GRKERNSEC_CHROOT
76947+ help
76948+ If you say Y here, processes inside a chroot will not be allowed to
76949+ mknod. The problem with using mknod inside a chroot is that it
76950+ would allow an attacker to create a device entry that is the same
76951+ as one on the physical root of your system, which could range from
76952+ anything from the console device to a device for your harddrive (which
76953+ they could then use to wipe the drive or steal data). It is recommended
76954+ that you say Y here, unless you run into software incompatibilities.
76955+ If the sysctl option is enabled, a sysctl option with name
76956+ "chroot_deny_mknod" is created.
76957+
76958+config GRKERNSEC_CHROOT_SHMAT
76959+ bool "Deny shmat() out of chroot"
76960+ default y if GRKERNSEC_CONFIG_AUTO
76961+ depends on GRKERNSEC_CHROOT
76962+ help
76963+ If you say Y here, processes inside a chroot will not be able to attach
76964+ to shared memory segments that were created outside of the chroot jail.
76965+ It is recommended that you say Y here. If the sysctl option is enabled,
76966+ a sysctl option with name "chroot_deny_shmat" is created.
76967+
76968+config GRKERNSEC_CHROOT_UNIX
76969+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
76970+ default y if GRKERNSEC_CONFIG_AUTO
76971+ depends on GRKERNSEC_CHROOT
76972+ help
76973+ If you say Y here, processes inside a chroot will not be able to
76974+ connect to abstract (meaning not belonging to a filesystem) Unix
76975+ domain sockets that were bound outside of a chroot. It is recommended
76976+ that you say Y here. If the sysctl option is enabled, a sysctl option
76977+ with name "chroot_deny_unix" is created.
76978+
76979+config GRKERNSEC_CHROOT_FINDTASK
76980+ bool "Protect outside processes"
76981+ default y if GRKERNSEC_CONFIG_AUTO
76982+ depends on GRKERNSEC_CHROOT
76983+ help
76984+ If you say Y here, processes inside a chroot will not be able to
76985+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
76986+ getsid, or view any process outside of the chroot. If the sysctl
76987+ option is enabled, a sysctl option with name "chroot_findtask" is
76988+ created.
76989+
76990+config GRKERNSEC_CHROOT_NICE
76991+ bool "Restrict priority changes"
76992+ default y if GRKERNSEC_CONFIG_AUTO
76993+ depends on GRKERNSEC_CHROOT
76994+ help
76995+ If you say Y here, processes inside a chroot will not be able to raise
76996+ the priority of processes in the chroot, or alter the priority of
76997+ processes outside the chroot. This provides more security than simply
76998+ removing CAP_SYS_NICE from the process' capability set. If the
76999+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
77000+ is created.
77001+
77002+config GRKERNSEC_CHROOT_SYSCTL
77003+ bool "Deny sysctl writes"
77004+ default y if GRKERNSEC_CONFIG_AUTO
77005+ depends on GRKERNSEC_CHROOT
77006+ help
77007+ If you say Y here, an attacker in a chroot will not be able to
77008+ write to sysctl entries, either by sysctl(2) or through a /proc
77009+ interface. It is strongly recommended that you say Y here. If the
77010+ sysctl option is enabled, a sysctl option with name
77011+ "chroot_deny_sysctl" is created.
77012+
77013+config GRKERNSEC_CHROOT_RENAME
77014+ bool "Deny bad renames"
77015+ default y if GRKERNSEC_CONFIG_AUTO
77016+ depends on GRKERNSEC_CHROOT
77017+ help
77018+ If you say Y here, an attacker in a chroot will not be able to
77019+ abuse the ability to create double chroots to break out of the
77020+ chroot by exploiting a race condition between a rename of a directory
77021+ within a chroot against an open of a symlink with relative path
77022+ components. This feature will likewise prevent an accomplice outside
77023+ a chroot from enabling a user inside the chroot to break out and make
77024+ use of their credentials on the global filesystem. Enabling this
77025+ feature is essential to prevent root users from breaking out of a
77026+ chroot. If the sysctl option is enabled, a sysctl option with name
77027+ "chroot_deny_bad_rename" is created.
77028+
77029+config GRKERNSEC_CHROOT_CAPS
77030+ bool "Capability restrictions"
77031+ default y if GRKERNSEC_CONFIG_AUTO
77032+ depends on GRKERNSEC_CHROOT
77033+ help
77034+ If you say Y here, the capabilities on all processes within a
77035+ chroot jail will be lowered to stop module insertion, raw i/o,
77036+ system and net admin tasks, rebooting the system, modifying immutable
77037+ files, modifying IPC owned by another, and changing the system time.
77038+ This is left an option because it can break some apps. Disable this
77039+ if your chrooted apps are having problems performing those kinds of
77040+ tasks. If the sysctl option is enabled, a sysctl option with
77041+ name "chroot_caps" is created.
77042+
77043+config GRKERNSEC_CHROOT_INITRD
77044+ bool "Exempt initrd tasks from restrictions"
77045+ default y if GRKERNSEC_CONFIG_AUTO
77046+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
77047+ help
77048+ If you say Y here, tasks started prior to init will be exempted from
77049+ grsecurity's chroot restrictions. This option is mainly meant to
77050+ resolve Plymouth's performing privileged operations unnecessarily
77051+ in a chroot.
77052+
77053+endmenu
77054+menu "Kernel Auditing"
77055+depends on GRKERNSEC
77056+
77057+config GRKERNSEC_AUDIT_GROUP
77058+ bool "Single group for auditing"
77059+ help
77060+ If you say Y here, the exec and chdir logging features will only operate
77061+ on a group you specify. This option is recommended if you only want to
77062+ watch certain users instead of having a large amount of logs from the
77063+ entire system. If the sysctl option is enabled, a sysctl option with
77064+ name "audit_group" is created.
77065+
77066+config GRKERNSEC_AUDIT_GID
77067+ int "GID for auditing"
77068+ depends on GRKERNSEC_AUDIT_GROUP
77069+ default 1007
77070+
77071+config GRKERNSEC_EXECLOG
77072+ bool "Exec logging"
77073+ help
77074+ If you say Y here, all execve() calls will be logged (since the
77075+ other exec*() calls are frontends to execve(), all execution
77076+ will be logged). Useful for shell-servers that like to keep track
77077+ of their users. If the sysctl option is enabled, a sysctl option with
77078+ name "exec_logging" is created.
77079+ WARNING: This option when enabled will produce a LOT of logs, especially
77080+ on an active system.
77081+
77082+config GRKERNSEC_RESLOG
77083+ bool "Resource logging"
77084+ default y if GRKERNSEC_CONFIG_AUTO
77085+ help
77086+ If you say Y here, all attempts to overstep resource limits will
77087+ be logged with the resource name, the requested size, and the current
77088+ limit. It is highly recommended that you say Y here. If the sysctl
77089+ option is enabled, a sysctl option with name "resource_logging" is
77090+ created. If the RBAC system is enabled, the sysctl value is ignored.
77091+
77092+config GRKERNSEC_CHROOT_EXECLOG
77093+ bool "Log execs within chroot"
77094+ help
77095+ If you say Y here, all executions inside a chroot jail will be logged
77096+ to syslog. This can cause a large amount of logs if certain
77097+ applications (eg. djb's daemontools) are installed on the system, and
77098+ is therefore left as an option. If the sysctl option is enabled, a
77099+ sysctl option with name "chroot_execlog" is created.
77100+
77101+config GRKERNSEC_AUDIT_PTRACE
77102+ bool "Ptrace logging"
77103+ help
77104+ If you say Y here, all attempts to attach to a process via ptrace
77105+ will be logged. If the sysctl option is enabled, a sysctl option
77106+ with name "audit_ptrace" is created.
77107+
77108+config GRKERNSEC_AUDIT_CHDIR
77109+ bool "Chdir logging"
77110+ help
77111+ If you say Y here, all chdir() calls will be logged. If the sysctl
77112+ option is enabled, a sysctl option with name "audit_chdir" is created.
77113+
77114+config GRKERNSEC_AUDIT_MOUNT
77115+ bool "(Un)Mount logging"
77116+ help
77117+ If you say Y here, all mounts and unmounts will be logged. If the
77118+ sysctl option is enabled, a sysctl option with name "audit_mount" is
77119+ created.
77120+
77121+config GRKERNSEC_SIGNAL
77122+ bool "Signal logging"
77123+ default y if GRKERNSEC_CONFIG_AUTO
77124+ help
77125+ If you say Y here, certain important signals will be logged, such as
77126+ SIGSEGV, which will as a result inform you of when a error in a program
77127+ occurred, which in some cases could mean a possible exploit attempt.
77128+ If the sysctl option is enabled, a sysctl option with name
77129+ "signal_logging" is created.
77130+
77131+config GRKERNSEC_FORKFAIL
77132+ bool "Fork failure logging"
77133+ help
77134+ If you say Y here, all failed fork() attempts will be logged.
77135+ This could suggest a fork bomb, or someone attempting to overstep
77136+ their process limit. If the sysctl option is enabled, a sysctl option
77137+ with name "forkfail_logging" is created.
77138+
77139+config GRKERNSEC_TIME
77140+ bool "Time change logging"
77141+ default y if GRKERNSEC_CONFIG_AUTO
77142+ help
77143+ If you say Y here, any changes of the system clock will be logged.
77144+ If the sysctl option is enabled, a sysctl option with name
77145+ "timechange_logging" is created.
77146+
77147+config GRKERNSEC_PROC_IPADDR
77148+ bool "/proc/<pid>/ipaddr support"
77149+ default y if GRKERNSEC_CONFIG_AUTO
77150+ help
77151+ If you say Y here, a new entry will be added to each /proc/<pid>
77152+ directory that contains the IP address of the person using the task.
77153+ The IP is carried across local TCP and AF_UNIX stream sockets.
77154+ This information can be useful for IDS/IPSes to perform remote response
77155+ to a local attack. The entry is readable by only the owner of the
77156+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
77157+ the RBAC system), and thus does not create privacy concerns.
77158+
77159+config GRKERNSEC_RWXMAP_LOG
77160+ bool 'Denied RWX mmap/mprotect logging'
77161+ default y if GRKERNSEC_CONFIG_AUTO
77162+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
77163+ help
77164+ If you say Y here, calls to mmap() and mprotect() with explicit
77165+ usage of PROT_WRITE and PROT_EXEC together will be logged when
77166+ denied by the PAX_MPROTECT feature. This feature will also
77167+ log other problematic scenarios that can occur when PAX_MPROTECT
77168+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
77169+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
77170+ is created.
77171+
77172+endmenu
77173+
77174+menu "Executable Protections"
77175+depends on GRKERNSEC
77176+
77177+config GRKERNSEC_DMESG
77178+ bool "Dmesg(8) restriction"
77179+ default y if GRKERNSEC_CONFIG_AUTO
77180+ help
77181+ If you say Y here, non-root users will not be able to use dmesg(8)
77182+ to view the contents of the kernel's circular log buffer.
77183+ The kernel's log buffer often contains kernel addresses and other
77184+ identifying information useful to an attacker in fingerprinting a
77185+ system for a targeted exploit.
77186+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
77187+ created.
77188+
77189+config GRKERNSEC_HARDEN_PTRACE
77190+ bool "Deter ptrace-based process snooping"
77191+ default y if GRKERNSEC_CONFIG_AUTO
77192+ help
77193+ If you say Y here, TTY sniffers and other malicious monitoring
77194+ programs implemented through ptrace will be defeated. If you
77195+ have been using the RBAC system, this option has already been
77196+ enabled for several years for all users, with the ability to make
77197+ fine-grained exceptions.
77198+
77199+ This option only affects the ability of non-root users to ptrace
77200+ processes that are not a descendent of the ptracing process.
77201+ This means that strace ./binary and gdb ./binary will still work,
77202+ but attaching to arbitrary processes will not. If the sysctl
77203+ option is enabled, a sysctl option with name "harden_ptrace" is
77204+ created.
77205+
77206+config GRKERNSEC_PTRACE_READEXEC
77207+ bool "Require read access to ptrace sensitive binaries"
77208+ default y if GRKERNSEC_CONFIG_AUTO
77209+ help
77210+ If you say Y here, unprivileged users will not be able to ptrace unreadable
77211+ binaries. This option is useful in environments that
77212+ remove the read bits (e.g. file mode 4711) from suid binaries to
77213+ prevent infoleaking of their contents. This option adds
77214+ consistency to the use of that file mode, as the binary could normally
77215+ be read out when run without privileges while ptracing.
77216+
77217+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
77218+ is created.
77219+
77220+config GRKERNSEC_SETXID
77221+ bool "Enforce consistent multithreaded privileges"
77222+ default y if GRKERNSEC_CONFIG_AUTO
77223+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
77224+ help
77225+ If you say Y here, a change from a root uid to a non-root uid
77226+ in a multithreaded application will cause the resulting uids,
77227+ gids, supplementary groups, and capabilities in that thread
77228+ to be propagated to the other threads of the process. In most
77229+ cases this is unnecessary, as glibc will emulate this behavior
77230+ on behalf of the application. Other libcs do not act in the
77231+ same way, allowing the other threads of the process to continue
77232+ running with root privileges. If the sysctl option is enabled,
77233+ a sysctl option with name "consistent_setxid" is created.
77234+
77235+config GRKERNSEC_HARDEN_IPC
77236+ bool "Disallow access to overly-permissive IPC objects"
77237+ default y if GRKERNSEC_CONFIG_AUTO
77238+ depends on SYSVIPC
77239+ help
77240+ If you say Y here, access to overly-permissive IPC objects (shared
77241+ memory, message queues, and semaphores) will be denied for processes
77242+ given the following criteria beyond normal permission checks:
77243+ 1) If the IPC object is world-accessible and the euid doesn't match
77244+ that of the creator or current uid for the IPC object
77245+ 2) If the IPC object is group-accessible and the egid doesn't
77246+ match that of the creator or current gid for the IPC object
77247+ It's a common error to grant too much permission to these objects,
77248+ with impact ranging from denial of service and information leaking to
77249+ privilege escalation. This feature was developed in response to
77250+ research by Tim Brown:
77251+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
77252+ who found hundreds of such insecure usages. Processes with
77253+ CAP_IPC_OWNER are still permitted to access these IPC objects.
77254+ If the sysctl option is enabled, a sysctl option with name
77255+ "harden_ipc" is created.
77256+
77257+config GRKERNSEC_TPE
77258+ bool "Trusted Path Execution (TPE)"
77259+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
77260+ help
77261+ If you say Y here, you will be able to choose a gid to add to the
77262+ supplementary groups of users you want to mark as "untrusted."
77263+ These users will not be able to execute any files that are not in
77264+ root-owned directories writable only by root. If the sysctl option
77265+ is enabled, a sysctl option with name "tpe" is created.
77266+
77267+config GRKERNSEC_TPE_ALL
77268+ bool "Partially restrict all non-root users"
77269+ depends on GRKERNSEC_TPE
77270+ help
77271+ If you say Y here, all non-root users will be covered under
77272+ a weaker TPE restriction. This is separate from, and in addition to,
77273+ the main TPE options that you have selected elsewhere. Thus, if a
77274+ "trusted" GID is chosen, this restriction applies to even that GID.
77275+ Under this restriction, all non-root users will only be allowed to
77276+ execute files in directories they own that are not group or
77277+ world-writable, or in directories owned by root and writable only by
77278+ root. If the sysctl option is enabled, a sysctl option with name
77279+ "tpe_restrict_all" is created.
77280+
77281+config GRKERNSEC_TPE_INVERT
77282+ bool "Invert GID option"
77283+ depends on GRKERNSEC_TPE
77284+ help
77285+ If you say Y here, the group you specify in the TPE configuration will
77286+ decide what group TPE restrictions will be *disabled* for. This
77287+ option is useful if you want TPE restrictions to be applied to most
77288+ users on the system. If the sysctl option is enabled, a sysctl option
77289+ with name "tpe_invert" is created. Unlike other sysctl options, this
77290+ entry will default to on for backward-compatibility.
77291+
77292+config GRKERNSEC_TPE_GID
77293+ int
77294+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
77295+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
77296+
77297+config GRKERNSEC_TPE_UNTRUSTED_GID
77298+ int "GID for TPE-untrusted users"
77299+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
77300+ default 1005
77301+ help
77302+ Setting this GID determines what group TPE restrictions will be
77303+ *enabled* for. If the sysctl option is enabled, a sysctl option
77304+ with name "tpe_gid" is created.
77305+
77306+config GRKERNSEC_TPE_TRUSTED_GID
77307+ int "GID for TPE-trusted users"
77308+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
77309+ default 1005
77310+ help
77311+ Setting this GID determines what group TPE restrictions will be
77312+ *disabled* for. If the sysctl option is enabled, a sysctl option
77313+ with name "tpe_gid" is created.
77314+
77315+endmenu
77316+menu "Network Protections"
77317+depends on GRKERNSEC
77318+
77319+config GRKERNSEC_BLACKHOLE
77320+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
77321+ default y if GRKERNSEC_CONFIG_AUTO
77322+ depends on NET
77323+ help
77324+ If you say Y here, neither TCP resets nor ICMP
77325+ destination-unreachable packets will be sent in response to packets
77326+ sent to ports for which no associated listening process exists.
77327+ It will also prevent the sending of ICMP protocol unreachable packets
77328+ in response to packets with unknown protocols.
77329+ This feature supports both IPV4 and IPV6 and exempts the
77330+ loopback interface from blackholing. Enabling this feature
77331+ makes a host more resilient to DoS attacks and reduces network
77332+ visibility against scanners.
77333+
77334+ The blackhole feature as-implemented is equivalent to the FreeBSD
77335+ blackhole feature, as it prevents RST responses to all packets, not
77336+ just SYNs. Under most application behavior this causes no
77337+ problems, but applications (like haproxy) may not close certain
77338+ connections in a way that cleanly terminates them on the remote
77339+ end, leaving the remote host in LAST_ACK state. Because of this
77340+ side-effect and to prevent intentional LAST_ACK DoSes, this
77341+ feature also adds automatic mitigation against such attacks.
77342+ The mitigation drastically reduces the amount of time a socket
77343+ can spend in LAST_ACK state. If you're using haproxy and not
77344+ all servers it connects to have this option enabled, consider
77345+ disabling this feature on the haproxy host.
77346+
77347+ If the sysctl option is enabled, two sysctl options with names
77348+ "ip_blackhole" and "lastack_retries" will be created.
77349+ While "ip_blackhole" takes the standard zero/non-zero on/off
77350+ toggle, "lastack_retries" uses the same kinds of values as
77351+ "tcp_retries1" and "tcp_retries2". The default value of 4
77352+ prevents a socket from lasting more than 45 seconds in LAST_ACK
77353+ state.
77354+
77355+config GRKERNSEC_NO_SIMULT_CONNECT
77356+ bool "Disable TCP Simultaneous Connect"
77357+ default y if GRKERNSEC_CONFIG_AUTO
77358+ depends on NET
77359+ help
77360+ If you say Y here, a feature by Willy Tarreau will be enabled that
77361+ removes a weakness in Linux's strict implementation of TCP that
77362+ allows two clients to connect to each other without either entering
77363+ a listening state. The weakness allows an attacker to easily prevent
77364+ a client from connecting to a known server provided the source port
77365+ for the connection is guessed correctly.
77366+
77367+ As the weakness could be used to prevent an antivirus or IPS from
77368+ fetching updates, or prevent an SSL gateway from fetching a CRL,
77369+ it should be eliminated by enabling this option. Though Linux is
77370+ one of few operating systems supporting simultaneous connect, it
77371+ has no legitimate use in practice and is rarely supported by firewalls.
77372+
77373+config GRKERNSEC_SOCKET
77374+ bool "Socket restrictions"
77375+ depends on NET
77376+ help
77377+ If you say Y here, you will be able to choose from several options.
77378+ If you assign a GID on your system and add it to the supplementary
77379+ groups of users you want to restrict socket access to, this patch
77380+ will perform up to three things, based on the option(s) you choose.
77381+
77382+config GRKERNSEC_SOCKET_ALL
77383+ bool "Deny any sockets to group"
77384+ depends on GRKERNSEC_SOCKET
77385+ help
77386+ If you say Y here, you will be able to choose a GID of whose users will
77387+ be unable to connect to other hosts from your machine or run server
77388+ applications from your machine. If the sysctl option is enabled, a
77389+ sysctl option with name "socket_all" is created.
77390+
77391+config GRKERNSEC_SOCKET_ALL_GID
77392+ int "GID to deny all sockets for"
77393+ depends on GRKERNSEC_SOCKET_ALL
77394+ default 1004
77395+ help
77396+ Here you can choose the GID to disable socket access for. Remember to
77397+ add the users you want socket access disabled for to the GID
77398+ specified here. If the sysctl option is enabled, a sysctl option
77399+ with name "socket_all_gid" is created.
77400+
77401+config GRKERNSEC_SOCKET_CLIENT
77402+ bool "Deny client sockets to group"
77403+ depends on GRKERNSEC_SOCKET
77404+ help
77405+ If you say Y here, you will be able to choose a GID of whose users will
77406+ be unable to connect to other hosts from your machine, but will be
77407+ able to run servers. If this option is enabled, all users in the group
77408+ you specify will have to use passive mode when initiating ftp transfers
77409+ from the shell on your machine. If the sysctl option is enabled, a
77410+ sysctl option with name "socket_client" is created.
77411+
77412+config GRKERNSEC_SOCKET_CLIENT_GID
77413+ int "GID to deny client sockets for"
77414+ depends on GRKERNSEC_SOCKET_CLIENT
77415+ default 1003
77416+ help
77417+ Here you can choose the GID to disable client socket access for.
77418+ Remember to add the users you want client socket access disabled for to
77419+ the GID specified here. If the sysctl option is enabled, a sysctl
77420+ option with name "socket_client_gid" is created.
77421+
77422+config GRKERNSEC_SOCKET_SERVER
77423+ bool "Deny server sockets to group"
77424+ depends on GRKERNSEC_SOCKET
77425+ help
77426+ If you say Y here, you will be able to choose a GID of whose users will
77427+ be unable to run server applications from your machine. If the sysctl
77428+ option is enabled, a sysctl option with name "socket_server" is created.
77429+
77430+config GRKERNSEC_SOCKET_SERVER_GID
77431+ int "GID to deny server sockets for"
77432+ depends on GRKERNSEC_SOCKET_SERVER
77433+ default 1002
77434+ help
77435+ Here you can choose the GID to disable server socket access for.
77436+ Remember to add the users you want server socket access disabled for to
77437+ the GID specified here. If the sysctl option is enabled, a sysctl
77438+ option with name "socket_server_gid" is created.
77439+
77440+endmenu
77441+
77442+menu "Physical Protections"
77443+depends on GRKERNSEC
77444+
77445+config GRKERNSEC_DENYUSB
77446+ bool "Deny new USB connections after toggle"
77447+ default y if GRKERNSEC_CONFIG_AUTO
77448+ depends on SYSCTL && USB_SUPPORT
77449+ help
77450+ If you say Y here, a new sysctl option with name "deny_new_usb"
77451+ will be created. Setting its value to 1 will prevent any new
77452+ USB devices from being recognized by the OS. Any attempted USB
77453+ device insertion will be logged. This option is intended to be
77454+ used against custom USB devices designed to exploit vulnerabilities
77455+ in various USB device drivers.
77456+
77457+ For greatest effectiveness, this sysctl should be set after any
77458+ relevant init scripts. This option is safe to enable in distros
77459+ as each user can choose whether or not to toggle the sysctl.
77460+
77461+config GRKERNSEC_DENYUSB_FORCE
77462+ bool "Reject all USB devices not connected at boot"
77463+ select USB
77464+ depends on GRKERNSEC_DENYUSB
77465+ help
77466+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
77467+ that doesn't involve a sysctl entry. This option should only be
77468+ enabled if you're sure you want to deny all new USB connections
77469+ at runtime and don't want to modify init scripts. This should not
77470+ be enabled by distros. It forces the core USB code to be built
77471+ into the kernel image so that all devices connected at boot time
77472+ can be recognized and new USB device connections can be prevented
77473+ prior to init running.
77474+
77475+endmenu
77476+
77477+menu "Sysctl Support"
77478+depends on GRKERNSEC && SYSCTL
77479+
77480+config GRKERNSEC_SYSCTL
77481+ bool "Sysctl support"
77482+ default y if GRKERNSEC_CONFIG_AUTO
77483+ help
77484+ If you say Y here, you will be able to change the options that
77485+ grsecurity runs with at bootup, without having to recompile your
77486+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
77487+ to enable (1) or disable (0) various features. All the sysctl entries
77488+ are mutable until the "grsec_lock" entry is set to a non-zero value.
77489+ All features enabled in the kernel configuration are disabled at boot
77490+ if you do not say Y to the "Turn on features by default" option.
77491+ All options should be set at startup, and the grsec_lock entry should
77492+ be set to a non-zero value after all the options are set.
77493+ *THIS IS EXTREMELY IMPORTANT*
77494+
77495+config GRKERNSEC_SYSCTL_DISTRO
77496+ bool "Extra sysctl support for distro makers (READ HELP)"
77497+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
77498+ help
77499+ If you say Y here, additional sysctl options will be created
77500+ for features that affect processes running as root. Therefore,
77501+ it is critical when using this option that the grsec_lock entry be
77502+ enabled after boot. Only distros with prebuilt kernel packages
77503+ with this option enabled that can ensure grsec_lock is enabled
77504+ after boot should use this option.
77505+ *Failure to set grsec_lock after boot makes all grsec features
77506+ this option covers useless*
77507+
77508+ Currently this option creates the following sysctl entries:
77509+ "Disable Privileged I/O": "disable_priv_io"
77510+
77511+config GRKERNSEC_SYSCTL_ON
77512+ bool "Turn on features by default"
77513+ default y if GRKERNSEC_CONFIG_AUTO
77514+ depends on GRKERNSEC_SYSCTL
77515+ help
77516+ If you say Y here, instead of having all features enabled in the
77517+ kernel configuration disabled at boot time, the features will be
77518+ enabled at boot time. It is recommended you say Y here unless
77519+ there is some reason you would want all sysctl-tunable features to
77520+ be disabled by default. As mentioned elsewhere, it is important
77521+ to enable the grsec_lock entry once you have finished modifying
77522+ the sysctl entries.
77523+
77524+endmenu
77525+menu "Logging Options"
77526+depends on GRKERNSEC
77527+
77528+config GRKERNSEC_FLOODTIME
77529+ int "Seconds in between log messages (minimum)"
77530+ default 10
77531+ help
77532+ This option allows you to enforce the number of seconds between
77533+ grsecurity log messages. The default should be suitable for most
77534+ people, however, if you choose to change it, choose a value small enough
77535+ to allow informative logs to be produced, but large enough to
77536+ prevent flooding.
77537+
77538+ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
77539+ any rate limiting on grsecurity log messages.
77540+
77541+config GRKERNSEC_FLOODBURST
77542+ int "Number of messages in a burst (maximum)"
77543+ default 6
77544+ help
77545+ This option allows you to choose the maximum number of messages allowed
77546+ within the flood time interval you chose in a separate option. The
77547+ default should be suitable for most people, however if you find that
77548+ many of your logs are being interpreted as flooding, you may want to
77549+ raise this value.
77550+
77551+ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
77552+ any rate limiting on grsecurity log messages.
77553+
77554+endmenu
77555diff --git a/grsecurity/Makefile b/grsecurity/Makefile
77556new file mode 100644
77557index 0000000..30ababb
77558--- /dev/null
77559+++ b/grsecurity/Makefile
77560@@ -0,0 +1,54 @@
77561+# grsecurity – access control and security hardening for Linux
77562+# All code in this directory and various hooks located throughout the Linux kernel are
77563+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
77564+# http://www.grsecurity.net spender@grsecurity.net
77565+#
77566+# This program is free software; you can redistribute it and/or
77567+# modify it under the terms of the GNU General Public License version 2
77568+# as published by the Free Software Foundation.
77569+#
77570+# This program is distributed in the hope that it will be useful,
77571+# but WITHOUT ANY WARRANTY; without even the implied warranty of
77572+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
77573+# GNU General Public License for more details.
77574+#
77575+# You should have received a copy of the GNU General Public License
77576+# along with this program; if not, write to the Free Software
77577+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
77578+
77579+KBUILD_CFLAGS += -Werror
77580+
77581+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
77582+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
77583+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
77584+ grsec_usb.o grsec_ipc.o grsec_proc.o
77585+
77586+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
77587+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
77588+ gracl_learn.o grsec_log.o gracl_policy.o
77589+ifdef CONFIG_COMPAT
77590+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
77591+endif
77592+
77593+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
77594+
77595+ifdef CONFIG_NET
77596+obj-y += grsec_sock.o
77597+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
77598+endif
77599+
77600+ifndef CONFIG_GRKERNSEC
77601+obj-y += grsec_disabled.o
77602+endif
77603+
77604+ifdef CONFIG_GRKERNSEC_HIDESYM
77605+extra-y := grsec_hidesym.o
77606+$(obj)/grsec_hidesym.o:
77607+ @-chmod -f 500 /boot
77608+ @-chmod -f 500 /lib/modules
77609+ @-chmod -f 500 /lib64/modules
77610+ @-chmod -f 500 /lib32/modules
77611+ @-chmod -f 700 .
77612+ @-chmod -f 700 $(objtree)
77613+ @echo ' grsec: protected kernel image paths'
77614+endif
77615diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
77616new file mode 100644
77617index 0000000..811af1f
77618--- /dev/null
77619+++ b/grsecurity/gracl.c
77620@@ -0,0 +1,2749 @@
77621+#include <linux/kernel.h>
77622+#include <linux/module.h>
77623+#include <linux/sched.h>
77624+#include <linux/mm.h>
77625+#include <linux/file.h>
77626+#include <linux/fs.h>
77627+#include <linux/namei.h>
77628+#include <linux/mount.h>
77629+#include <linux/tty.h>
77630+#include <linux/proc_fs.h>
77631+#include <linux/lglock.h>
77632+#include <linux/slab.h>
77633+#include <linux/vmalloc.h>
77634+#include <linux/types.h>
77635+#include <linux/sysctl.h>
77636+#include <linux/netdevice.h>
77637+#include <linux/ptrace.h>
77638+#include <linux/gracl.h>
77639+#include <linux/gralloc.h>
77640+#include <linux/security.h>
77641+#include <linux/grinternal.h>
77642+#include <linux/pid_namespace.h>
77643+#include <linux/stop_machine.h>
77644+#include <linux/fdtable.h>
77645+#include <linux/percpu.h>
77646+#include <linux/lglock.h>
77647+#include <linux/hugetlb.h>
77648+#include <linux/posix-timers.h>
77649+#include <linux/prefetch.h>
77650+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
77651+#include <linux/magic.h>
77652+#include <linux/pagemap.h>
77653+#include "../fs/btrfs/async-thread.h"
77654+#include "../fs/btrfs/ctree.h"
77655+#include "../fs/btrfs/btrfs_inode.h"
77656+#endif
77657+#include "../fs/mount.h"
77658+
77659+#include <asm/uaccess.h>
77660+#include <asm/errno.h>
77661+#include <asm/mman.h>
77662+
77663+#define FOR_EACH_ROLE_START(role) \
77664+ role = running_polstate.role_list; \
77665+ while (role) {
77666+
77667+#define FOR_EACH_ROLE_END(role) \
77668+ role = role->prev; \
77669+ }
77670+
77671+extern struct path gr_real_root;
77672+
77673+static struct gr_policy_state running_polstate;
77674+struct gr_policy_state *polstate = &running_polstate;
77675+extern struct gr_alloc_state *current_alloc_state;
77676+
77677+extern char *gr_shared_page[4];
77678+DEFINE_RWLOCK(gr_inode_lock);
77679+
77680+static unsigned int gr_status __read_only = GR_STATUS_INIT;
77681+
77682+#ifdef CONFIG_NET
77683+extern struct vfsmount *sock_mnt;
77684+#endif
77685+
77686+extern struct vfsmount *pipe_mnt;
77687+extern struct vfsmount *shm_mnt;
77688+
77689+#ifdef CONFIG_HUGETLBFS
77690+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
77691+#endif
77692+
77693+extern u16 acl_sp_role_value;
77694+extern struct acl_object_label *fakefs_obj_rw;
77695+extern struct acl_object_label *fakefs_obj_rwx;
77696+
77697+int gr_acl_is_enabled(void)
77698+{
77699+ return (gr_status & GR_READY);
77700+}
77701+
77702+void gr_enable_rbac_system(void)
77703+{
77704+ pax_open_kernel();
77705+ gr_status |= GR_READY;
77706+ pax_close_kernel();
77707+}
77708+
77709+int gr_rbac_disable(void *unused)
77710+{
77711+ pax_open_kernel();
77712+ gr_status &= ~GR_READY;
77713+ pax_close_kernel();
77714+
77715+ return 0;
77716+}
77717+
77718+static inline dev_t __get_dev(const struct dentry *dentry)
77719+{
77720+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
77721+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
77722+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
77723+ else
77724+#endif
77725+ return dentry->d_sb->s_dev;
77726+}
77727+
77728+static inline u64 __get_ino(const struct dentry *dentry)
77729+{
77730+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
77731+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
77732+ return btrfs_ino(dentry->d_inode);
77733+ else
77734+#endif
77735+ return dentry->d_inode->i_ino;
77736+}
77737+
77738+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
77739+{
77740+ return __get_dev(dentry);
77741+}
77742+
77743+u64 gr_get_ino_from_dentry(struct dentry *dentry)
77744+{
77745+ return __get_ino(dentry);
77746+}
77747+
77748+static char gr_task_roletype_to_char(struct task_struct *task)
77749+{
77750+ switch (task->role->roletype &
77751+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
77752+ GR_ROLE_SPECIAL)) {
77753+ case GR_ROLE_DEFAULT:
77754+ return 'D';
77755+ case GR_ROLE_USER:
77756+ return 'U';
77757+ case GR_ROLE_GROUP:
77758+ return 'G';
77759+ case GR_ROLE_SPECIAL:
77760+ return 'S';
77761+ }
77762+
77763+ return 'X';
77764+}
77765+
77766+char gr_roletype_to_char(void)
77767+{
77768+ return gr_task_roletype_to_char(current);
77769+}
77770+
77771+int
77772+gr_acl_tpe_check(void)
77773+{
77774+ if (unlikely(!(gr_status & GR_READY)))
77775+ return 0;
77776+ if (current->role->roletype & GR_ROLE_TPE)
77777+ return 1;
77778+ else
77779+ return 0;
77780+}
77781+
77782+int
77783+gr_handle_rawio(const struct inode *inode)
77784+{
77785+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77786+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
77787+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
77788+ !capable(CAP_SYS_RAWIO))
77789+ return 1;
77790+#endif
77791+ return 0;
77792+}
77793+
77794+int
77795+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
77796+{
77797+ if (likely(lena != lenb))
77798+ return 0;
77799+
77800+ return !memcmp(a, b, lena);
77801+}
77802+
77803+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
77804+{
77805+ *buflen -= namelen;
77806+ if (*buflen < 0)
77807+ return -ENAMETOOLONG;
77808+ *buffer -= namelen;
77809+ memcpy(*buffer, str, namelen);
77810+ return 0;
77811+}
77812+
77813+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
77814+{
77815+ return prepend(buffer, buflen, name->name, name->len);
77816+}
77817+
77818+static int prepend_path(const struct path *path, struct path *root,
77819+ char **buffer, int *buflen)
77820+{
77821+ struct dentry *dentry = path->dentry;
77822+ struct vfsmount *vfsmnt = path->mnt;
77823+ struct mount *mnt = real_mount(vfsmnt);
77824+ bool slash = false;
77825+ int error = 0;
77826+
77827+ while (dentry != root->dentry || vfsmnt != root->mnt) {
77828+ struct dentry * parent;
77829+
77830+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
77831+ /* Global root? */
77832+ if (!mnt_has_parent(mnt)) {
77833+ goto out;
77834+ }
77835+ dentry = mnt->mnt_mountpoint;
77836+ mnt = mnt->mnt_parent;
77837+ vfsmnt = &mnt->mnt;
77838+ continue;
77839+ }
77840+ parent = dentry->d_parent;
77841+ prefetch(parent);
77842+ spin_lock(&dentry->d_lock);
77843+ error = prepend_name(buffer, buflen, &dentry->d_name);
77844+ spin_unlock(&dentry->d_lock);
77845+ if (!error)
77846+ error = prepend(buffer, buflen, "/", 1);
77847+ if (error)
77848+ break;
77849+
77850+ slash = true;
77851+ dentry = parent;
77852+ }
77853+
77854+out:
77855+ if (!error && !slash)
77856+ error = prepend(buffer, buflen, "/", 1);
77857+
77858+ return error;
77859+}
77860+
77861+/* this must be called with mount_lock and rename_lock held */
77862+
77863+static char *__our_d_path(const struct path *path, struct path *root,
77864+ char *buf, int buflen)
77865+{
77866+ char *res = buf + buflen;
77867+ int error;
77868+
77869+ prepend(&res, &buflen, "\0", 1);
77870+ error = prepend_path(path, root, &res, &buflen);
77871+ if (error)
77872+ return ERR_PTR(error);
77873+
77874+ return res;
77875+}
77876+
77877+static char *
77878+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
77879+{
77880+ char *retval;
77881+
77882+ retval = __our_d_path(path, root, buf, buflen);
77883+ if (unlikely(IS_ERR(retval)))
77884+ retval = strcpy(buf, "<path too long>");
77885+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
77886+ retval[1] = '\0';
77887+
77888+ return retval;
77889+}
77890+
77891+static char *
77892+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
77893+ char *buf, int buflen)
77894+{
77895+ struct path path;
77896+ char *res;
77897+
77898+ path.dentry = (struct dentry *)dentry;
77899+ path.mnt = (struct vfsmount *)vfsmnt;
77900+
77901+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
77902+ by the RBAC system */
77903+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
77904+
77905+ return res;
77906+}
77907+
77908+static char *
77909+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
77910+ char *buf, int buflen)
77911+{
77912+ char *res;
77913+ struct path path;
77914+ struct path root;
77915+ struct task_struct *reaper = init_pid_ns.child_reaper;
77916+
77917+ path.dentry = (struct dentry *)dentry;
77918+ path.mnt = (struct vfsmount *)vfsmnt;
77919+
77920+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
77921+ get_fs_root(reaper->fs, &root);
77922+
77923+ read_seqlock_excl(&mount_lock);
77924+ write_seqlock(&rename_lock);
77925+ res = gen_full_path(&path, &root, buf, buflen);
77926+ write_sequnlock(&rename_lock);
77927+ read_sequnlock_excl(&mount_lock);
77928+
77929+ path_put(&root);
77930+ return res;
77931+}
77932+
77933+char *
77934+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
77935+{
77936+ char *ret;
77937+ read_seqlock_excl(&mount_lock);
77938+ write_seqlock(&rename_lock);
77939+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
77940+ PAGE_SIZE);
77941+ write_sequnlock(&rename_lock);
77942+ read_sequnlock_excl(&mount_lock);
77943+ return ret;
77944+}
77945+
77946+static char *
77947+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
77948+{
77949+ char *ret;
77950+ char *buf;
77951+ int buflen;
77952+
77953+ read_seqlock_excl(&mount_lock);
77954+ write_seqlock(&rename_lock);
77955+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
77956+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
77957+ buflen = (int)(ret - buf);
77958+ if (buflen >= 5)
77959+ prepend(&ret, &buflen, "/proc", 5);
77960+ else
77961+ ret = strcpy(buf, "<path too long>");
77962+ write_sequnlock(&rename_lock);
77963+ read_sequnlock_excl(&mount_lock);
77964+ return ret;
77965+}
77966+
77967+char *
77968+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
77969+{
77970+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
77971+ PAGE_SIZE);
77972+}
77973+
77974+char *
77975+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
77976+{
77977+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
77978+ PAGE_SIZE);
77979+}
77980+
77981+char *
77982+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
77983+{
77984+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
77985+ PAGE_SIZE);
77986+}
77987+
77988+char *
77989+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
77990+{
77991+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
77992+ PAGE_SIZE);
77993+}
77994+
77995+char *
77996+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
77997+{
77998+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
77999+ PAGE_SIZE);
78000+}
78001+
78002+__u32
78003+to_gr_audit(const __u32 reqmode)
78004+{
78005+ /* masks off auditable permission flags, then shifts them to create
78006+ auditing flags, and adds the special case of append auditing if
78007+ we're requesting write */
78008+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
78009+}
78010+
78011+struct acl_role_label *
78012+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
78013+ const gid_t gid)
78014+{
78015+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
78016+ struct acl_role_label *match;
78017+ struct role_allowed_ip *ipp;
78018+ unsigned int x;
78019+ u32 curr_ip = task->signal->saved_ip;
78020+
78021+ match = state->acl_role_set.r_hash[index];
78022+
78023+ while (match) {
78024+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
78025+ for (x = 0; x < match->domain_child_num; x++) {
78026+ if (match->domain_children[x] == uid)
78027+ goto found;
78028+ }
78029+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
78030+ break;
78031+ match = match->next;
78032+ }
78033+found:
78034+ if (match == NULL) {
78035+ try_group:
78036+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
78037+ match = state->acl_role_set.r_hash[index];
78038+
78039+ while (match) {
78040+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
78041+ for (x = 0; x < match->domain_child_num; x++) {
78042+ if (match->domain_children[x] == gid)
78043+ goto found2;
78044+ }
78045+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
78046+ break;
78047+ match = match->next;
78048+ }
78049+found2:
78050+ if (match == NULL)
78051+ match = state->default_role;
78052+ if (match->allowed_ips == NULL)
78053+ return match;
78054+ else {
78055+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
78056+ if (likely
78057+ ((ntohl(curr_ip) & ipp->netmask) ==
78058+ (ntohl(ipp->addr) & ipp->netmask)))
78059+ return match;
78060+ }
78061+ match = state->default_role;
78062+ }
78063+ } else if (match->allowed_ips == NULL) {
78064+ return match;
78065+ } else {
78066+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
78067+ if (likely
78068+ ((ntohl(curr_ip) & ipp->netmask) ==
78069+ (ntohl(ipp->addr) & ipp->netmask)))
78070+ return match;
78071+ }
78072+ goto try_group;
78073+ }
78074+
78075+ return match;
78076+}
78077+
78078+static struct acl_role_label *
78079+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
78080+ const gid_t gid)
78081+{
78082+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
78083+}
78084+
78085+struct acl_subject_label *
78086+lookup_acl_subj_label(const u64 ino, const dev_t dev,
78087+ const struct acl_role_label *role)
78088+{
78089+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
78090+ struct acl_subject_label *match;
78091+
78092+ match = role->subj_hash[index];
78093+
78094+ while (match && (match->inode != ino || match->device != dev ||
78095+ (match->mode & GR_DELETED))) {
78096+ match = match->next;
78097+ }
78098+
78099+ if (match && !(match->mode & GR_DELETED))
78100+ return match;
78101+ else
78102+ return NULL;
78103+}
78104+
78105+struct acl_subject_label *
78106+lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev,
78107+ const struct acl_role_label *role)
78108+{
78109+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
78110+ struct acl_subject_label *match;
78111+
78112+ match = role->subj_hash[index];
78113+
78114+ while (match && (match->inode != ino || match->device != dev ||
78115+ !(match->mode & GR_DELETED))) {
78116+ match = match->next;
78117+ }
78118+
78119+ if (match && (match->mode & GR_DELETED))
78120+ return match;
78121+ else
78122+ return NULL;
78123+}
78124+
78125+static struct acl_object_label *
78126+lookup_acl_obj_label(const u64 ino, const dev_t dev,
78127+ const struct acl_subject_label *subj)
78128+{
78129+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
78130+ struct acl_object_label *match;
78131+
78132+ match = subj->obj_hash[index];
78133+
78134+ while (match && (match->inode != ino || match->device != dev ||
78135+ (match->mode & GR_DELETED))) {
78136+ match = match->next;
78137+ }
78138+
78139+ if (match && !(match->mode & GR_DELETED))
78140+ return match;
78141+ else
78142+ return NULL;
78143+}
78144+
78145+static struct acl_object_label *
78146+lookup_acl_obj_label_create(const u64 ino, const dev_t dev,
78147+ const struct acl_subject_label *subj)
78148+{
78149+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
78150+ struct acl_object_label *match;
78151+
78152+ match = subj->obj_hash[index];
78153+
78154+ while (match && (match->inode != ino || match->device != dev ||
78155+ !(match->mode & GR_DELETED))) {
78156+ match = match->next;
78157+ }
78158+
78159+ if (match && (match->mode & GR_DELETED))
78160+ return match;
78161+
78162+ match = subj->obj_hash[index];
78163+
78164+ while (match && (match->inode != ino || match->device != dev ||
78165+ (match->mode & GR_DELETED))) {
78166+ match = match->next;
78167+ }
78168+
78169+ if (match && !(match->mode & GR_DELETED))
78170+ return match;
78171+ else
78172+ return NULL;
78173+}
78174+
78175+struct name_entry *
78176+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
78177+{
78178+ unsigned int len = strlen(name);
78179+ unsigned int key = full_name_hash(name, len);
78180+ unsigned int index = key % state->name_set.n_size;
78181+ struct name_entry *match;
78182+
78183+ match = state->name_set.n_hash[index];
78184+
78185+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
78186+ match = match->next;
78187+
78188+ return match;
78189+}
78190+
78191+static struct name_entry *
78192+lookup_name_entry(const char *name)
78193+{
78194+ return __lookup_name_entry(&running_polstate, name);
78195+}
78196+
78197+static struct name_entry *
78198+lookup_name_entry_create(const char *name)
78199+{
78200+ unsigned int len = strlen(name);
78201+ unsigned int key = full_name_hash(name, len);
78202+ unsigned int index = key % running_polstate.name_set.n_size;
78203+ struct name_entry *match;
78204+
78205+ match = running_polstate.name_set.n_hash[index];
78206+
78207+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
78208+ !match->deleted))
78209+ match = match->next;
78210+
78211+ if (match && match->deleted)
78212+ return match;
78213+
78214+ match = running_polstate.name_set.n_hash[index];
78215+
78216+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
78217+ match->deleted))
78218+ match = match->next;
78219+
78220+ if (match && !match->deleted)
78221+ return match;
78222+ else
78223+ return NULL;
78224+}
78225+
78226+static struct inodev_entry *
78227+lookup_inodev_entry(const u64 ino, const dev_t dev)
78228+{
78229+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
78230+ struct inodev_entry *match;
78231+
78232+ match = running_polstate.inodev_set.i_hash[index];
78233+
78234+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
78235+ match = match->next;
78236+
78237+ return match;
78238+}
78239+
78240+void
78241+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
78242+{
78243+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
78244+ state->inodev_set.i_size);
78245+ struct inodev_entry **curr;
78246+
78247+ entry->prev = NULL;
78248+
78249+ curr = &state->inodev_set.i_hash[index];
78250+ if (*curr != NULL)
78251+ (*curr)->prev = entry;
78252+
78253+ entry->next = *curr;
78254+ *curr = entry;
78255+
78256+ return;
78257+}
78258+
78259+static void
78260+insert_inodev_entry(struct inodev_entry *entry)
78261+{
78262+ __insert_inodev_entry(&running_polstate, entry);
78263+}
78264+
78265+void
78266+insert_acl_obj_label(struct acl_object_label *obj,
78267+ struct acl_subject_label *subj)
78268+{
78269+ unsigned int index =
78270+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
78271+ struct acl_object_label **curr;
78272+
78273+ obj->prev = NULL;
78274+
78275+ curr = &subj->obj_hash[index];
78276+ if (*curr != NULL)
78277+ (*curr)->prev = obj;
78278+
78279+ obj->next = *curr;
78280+ *curr = obj;
78281+
78282+ return;
78283+}
78284+
78285+void
78286+insert_acl_subj_label(struct acl_subject_label *obj,
78287+ struct acl_role_label *role)
78288+{
78289+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
78290+ struct acl_subject_label **curr;
78291+
78292+ obj->prev = NULL;
78293+
78294+ curr = &role->subj_hash[index];
78295+ if (*curr != NULL)
78296+ (*curr)->prev = obj;
78297+
78298+ obj->next = *curr;
78299+ *curr = obj;
78300+
78301+ return;
78302+}
78303+
78304+/* derived from glibc fnmatch() 0: match, 1: no match*/
78305+
78306+static int
78307+glob_match(const char *p, const char *n)
78308+{
78309+ char c;
78310+
78311+ while ((c = *p++) != '\0') {
78312+ switch (c) {
78313+ case '?':
78314+ if (*n == '\0')
78315+ return 1;
78316+ else if (*n == '/')
78317+ return 1;
78318+ break;
78319+ case '\\':
78320+ if (*n != c)
78321+ return 1;
78322+ break;
78323+ case '*':
78324+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
78325+ if (*n == '/')
78326+ return 1;
78327+ else if (c == '?') {
78328+ if (*n == '\0')
78329+ return 1;
78330+ else
78331+ ++n;
78332+ }
78333+ }
78334+ if (c == '\0') {
78335+ return 0;
78336+ } else {
78337+ const char *endp;
78338+
78339+ if ((endp = strchr(n, '/')) == NULL)
78340+ endp = n + strlen(n);
78341+
78342+ if (c == '[') {
78343+ for (--p; n < endp; ++n)
78344+ if (!glob_match(p, n))
78345+ return 0;
78346+ } else if (c == '/') {
78347+ while (*n != '\0' && *n != '/')
78348+ ++n;
78349+ if (*n == '/' && !glob_match(p, n + 1))
78350+ return 0;
78351+ } else {
78352+ for (--p; n < endp; ++n)
78353+ if (*n == c && !glob_match(p, n))
78354+ return 0;
78355+ }
78356+
78357+ return 1;
78358+ }
78359+ case '[':
78360+ {
78361+ int not;
78362+ char cold;
78363+
78364+ if (*n == '\0' || *n == '/')
78365+ return 1;
78366+
78367+ not = (*p == '!' || *p == '^');
78368+ if (not)
78369+ ++p;
78370+
78371+ c = *p++;
78372+ for (;;) {
78373+ unsigned char fn = (unsigned char)*n;
78374+
78375+ if (c == '\0')
78376+ return 1;
78377+ else {
78378+ if (c == fn)
78379+ goto matched;
78380+ cold = c;
78381+ c = *p++;
78382+
78383+ if (c == '-' && *p != ']') {
78384+ unsigned char cend = *p++;
78385+
78386+ if (cend == '\0')
78387+ return 1;
78388+
78389+ if (cold <= fn && fn <= cend)
78390+ goto matched;
78391+
78392+ c = *p++;
78393+ }
78394+ }
78395+
78396+ if (c == ']')
78397+ break;
78398+ }
78399+ if (!not)
78400+ return 1;
78401+ break;
78402+ matched:
78403+ while (c != ']') {
78404+ if (c == '\0')
78405+ return 1;
78406+
78407+ c = *p++;
78408+ }
78409+ if (not)
78410+ return 1;
78411+ }
78412+ break;
78413+ default:
78414+ if (c != *n)
78415+ return 1;
78416+ }
78417+
78418+ ++n;
78419+ }
78420+
78421+ if (*n == '\0')
78422+ return 0;
78423+
78424+ if (*n == '/')
78425+ return 0;
78426+
78427+ return 1;
78428+}
78429+
78430+static struct acl_object_label *
78431+chk_glob_label(struct acl_object_label *globbed,
78432+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
78433+{
78434+ struct acl_object_label *tmp;
78435+
78436+ if (*path == NULL)
78437+ *path = gr_to_filename_nolock(dentry, mnt);
78438+
78439+ tmp = globbed;
78440+
78441+ while (tmp) {
78442+ if (!glob_match(tmp->filename, *path))
78443+ return tmp;
78444+ tmp = tmp->next;
78445+ }
78446+
78447+ return NULL;
78448+}
78449+
78450+static struct acl_object_label *
78451+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
78452+ const u64 curr_ino, const dev_t curr_dev,
78453+ const struct acl_subject_label *subj, char **path, const int checkglob)
78454+{
78455+ struct acl_subject_label *tmpsubj;
78456+ struct acl_object_label *retval;
78457+ struct acl_object_label *retval2;
78458+
78459+ tmpsubj = (struct acl_subject_label *) subj;
78460+ read_lock(&gr_inode_lock);
78461+ do {
78462+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
78463+ if (retval) {
78464+ if (checkglob && retval->globbed) {
78465+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
78466+ if (retval2)
78467+ retval = retval2;
78468+ }
78469+ break;
78470+ }
78471+ } while ((tmpsubj = tmpsubj->parent_subject));
78472+ read_unlock(&gr_inode_lock);
78473+
78474+ return retval;
78475+}
78476+
78477+static struct acl_object_label *
78478+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
78479+ struct dentry *curr_dentry,
78480+ const struct acl_subject_label *subj, char **path, const int checkglob)
78481+{
78482+ int newglob = checkglob;
78483+ u64 inode;
78484+ dev_t device;
78485+
78486+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
78487+ as we don't want a / * rule to match instead of the / object
78488+ don't do this for create lookups that call this function though, since they're looking up
78489+ on the parent and thus need globbing checks on all paths
78490+ */
78491+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
78492+ newglob = GR_NO_GLOB;
78493+
78494+ spin_lock(&curr_dentry->d_lock);
78495+ inode = __get_ino(curr_dentry);
78496+ device = __get_dev(curr_dentry);
78497+ spin_unlock(&curr_dentry->d_lock);
78498+
78499+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
78500+}
78501+
78502+#ifdef CONFIG_HUGETLBFS
78503+static inline bool
78504+is_hugetlbfs_mnt(const struct vfsmount *mnt)
78505+{
78506+ int i;
78507+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
78508+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
78509+ return true;
78510+ }
78511+
78512+ return false;
78513+}
78514+#endif
78515+
78516+static struct acl_object_label *
78517+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
78518+ const struct acl_subject_label *subj, char *path, const int checkglob)
78519+{
78520+ struct dentry *dentry = (struct dentry *) l_dentry;
78521+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
78522+ struct mount *real_mnt = real_mount(mnt);
78523+ struct acl_object_label *retval;
78524+ struct dentry *parent;
78525+
78526+ read_seqlock_excl(&mount_lock);
78527+ write_seqlock(&rename_lock);
78528+
78529+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
78530+#ifdef CONFIG_NET
78531+ mnt == sock_mnt ||
78532+#endif
78533+#ifdef CONFIG_HUGETLBFS
78534+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
78535+#endif
78536+ /* ignore Eric Biederman */
78537+ IS_PRIVATE(l_dentry->d_inode))) {
78538+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
78539+ goto out;
78540+ }
78541+
78542+ for (;;) {
78543+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
78544+ break;
78545+
78546+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
78547+ if (!mnt_has_parent(real_mnt))
78548+ break;
78549+
78550+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
78551+ if (retval != NULL)
78552+ goto out;
78553+
78554+ dentry = real_mnt->mnt_mountpoint;
78555+ real_mnt = real_mnt->mnt_parent;
78556+ mnt = &real_mnt->mnt;
78557+ continue;
78558+ }
78559+
78560+ parent = dentry->d_parent;
78561+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
78562+ if (retval != NULL)
78563+ goto out;
78564+
78565+ dentry = parent;
78566+ }
78567+
78568+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
78569+
78570+ /* gr_real_root is pinned so we don't have to hold a reference */
78571+ if (retval == NULL)
78572+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
78573+out:
78574+ write_sequnlock(&rename_lock);
78575+ read_sequnlock_excl(&mount_lock);
78576+
78577+ BUG_ON(retval == NULL);
78578+
78579+ return retval;
78580+}
78581+
78582+static struct acl_object_label *
78583+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
78584+ const struct acl_subject_label *subj)
78585+{
78586+ char *path = NULL;
78587+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
78588+}
78589+
78590+static struct acl_object_label *
78591+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
78592+ const struct acl_subject_label *subj)
78593+{
78594+ char *path = NULL;
78595+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
78596+}
78597+
78598+static struct acl_object_label *
78599+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
78600+ const struct acl_subject_label *subj, char *path)
78601+{
78602+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
78603+}
78604+
78605+struct acl_subject_label *
78606+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
78607+ const struct acl_role_label *role)
78608+{
78609+ struct dentry *dentry = (struct dentry *) l_dentry;
78610+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
78611+ struct mount *real_mnt = real_mount(mnt);
78612+ struct acl_subject_label *retval;
78613+ struct dentry *parent;
78614+
78615+ read_seqlock_excl(&mount_lock);
78616+ write_seqlock(&rename_lock);
78617+
78618+ for (;;) {
78619+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
78620+ break;
78621+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
78622+ if (!mnt_has_parent(real_mnt))
78623+ break;
78624+
78625+ spin_lock(&dentry->d_lock);
78626+ read_lock(&gr_inode_lock);
78627+ retval =
78628+ lookup_acl_subj_label(__get_ino(dentry),
78629+ __get_dev(dentry), role);
78630+ read_unlock(&gr_inode_lock);
78631+ spin_unlock(&dentry->d_lock);
78632+ if (retval != NULL)
78633+ goto out;
78634+
78635+ dentry = real_mnt->mnt_mountpoint;
78636+ real_mnt = real_mnt->mnt_parent;
78637+ mnt = &real_mnt->mnt;
78638+ continue;
78639+ }
78640+
78641+ spin_lock(&dentry->d_lock);
78642+ read_lock(&gr_inode_lock);
78643+ retval = lookup_acl_subj_label(__get_ino(dentry),
78644+ __get_dev(dentry), role);
78645+ read_unlock(&gr_inode_lock);
78646+ parent = dentry->d_parent;
78647+ spin_unlock(&dentry->d_lock);
78648+
78649+ if (retval != NULL)
78650+ goto out;
78651+
78652+ dentry = parent;
78653+ }
78654+
78655+ spin_lock(&dentry->d_lock);
78656+ read_lock(&gr_inode_lock);
78657+ retval = lookup_acl_subj_label(__get_ino(dentry),
78658+ __get_dev(dentry), role);
78659+ read_unlock(&gr_inode_lock);
78660+ spin_unlock(&dentry->d_lock);
78661+
78662+ if (unlikely(retval == NULL)) {
78663+ /* gr_real_root is pinned, we don't need to hold a reference */
78664+ read_lock(&gr_inode_lock);
78665+ retval = lookup_acl_subj_label(__get_ino(gr_real_root.dentry),
78666+ __get_dev(gr_real_root.dentry), role);
78667+ read_unlock(&gr_inode_lock);
78668+ }
78669+out:
78670+ write_sequnlock(&rename_lock);
78671+ read_sequnlock_excl(&mount_lock);
78672+
78673+ BUG_ON(retval == NULL);
78674+
78675+ return retval;
78676+}
78677+
78678+void
78679+assign_special_role(const char *rolename)
78680+{
78681+ struct acl_object_label *obj;
78682+ struct acl_role_label *r;
78683+ struct acl_role_label *assigned = NULL;
78684+ struct task_struct *tsk;
78685+ struct file *filp;
78686+
78687+ FOR_EACH_ROLE_START(r)
78688+ if (!strcmp(rolename, r->rolename) &&
78689+ (r->roletype & GR_ROLE_SPECIAL)) {
78690+ assigned = r;
78691+ break;
78692+ }
78693+ FOR_EACH_ROLE_END(r)
78694+
78695+ if (!assigned)
78696+ return;
78697+
78698+ read_lock(&tasklist_lock);
78699+ read_lock(&grsec_exec_file_lock);
78700+
78701+ tsk = current->real_parent;
78702+ if (tsk == NULL)
78703+ goto out_unlock;
78704+
78705+ filp = tsk->exec_file;
78706+ if (filp == NULL)
78707+ goto out_unlock;
78708+
78709+ tsk->is_writable = 0;
78710+ tsk->inherited = 0;
78711+
78712+ tsk->acl_sp_role = 1;
78713+ tsk->acl_role_id = ++acl_sp_role_value;
78714+ tsk->role = assigned;
78715+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
78716+
78717+ /* ignore additional mmap checks for processes that are writable
78718+ by the default ACL */
78719+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
78720+ if (unlikely(obj->mode & GR_WRITE))
78721+ tsk->is_writable = 1;
78722+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
78723+ if (unlikely(obj->mode & GR_WRITE))
78724+ tsk->is_writable = 1;
78725+
78726+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
78727+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
78728+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
78729+#endif
78730+
78731+out_unlock:
78732+ read_unlock(&grsec_exec_file_lock);
78733+ read_unlock(&tasklist_lock);
78734+ return;
78735+}
78736+
78737+
78738+static void
78739+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
78740+{
78741+ struct task_struct *task = current;
78742+ const struct cred *cred = current_cred();
78743+
78744+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
78745+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
78746+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
78747+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
78748+
78749+ return;
78750+}
78751+
78752+static void
78753+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
78754+{
78755+ struct task_struct *task = current;
78756+ const struct cred *cred = current_cred();
78757+
78758+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
78759+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
78760+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
78761+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
78762+
78763+ return;
78764+}
78765+
78766+static void
78767+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
78768+{
78769+ struct task_struct *task = current;
78770+ const struct cred *cred = current_cred();
78771+
78772+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
78773+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
78774+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
78775+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
78776+
78777+ return;
78778+}
78779+
78780+static void
78781+gr_set_proc_res(struct task_struct *task)
78782+{
78783+ struct acl_subject_label *proc;
78784+ unsigned short i;
78785+
78786+ proc = task->acl;
78787+
78788+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
78789+ return;
78790+
78791+ for (i = 0; i < RLIM_NLIMITS; i++) {
78792+ unsigned long rlim_cur, rlim_max;
78793+
78794+ if (!(proc->resmask & (1U << i)))
78795+ continue;
78796+
78797+ rlim_cur = proc->res[i].rlim_cur;
78798+ rlim_max = proc->res[i].rlim_max;
78799+
78800+ if (i == RLIMIT_NOFILE) {
78801+ unsigned long saved_sysctl_nr_open = sysctl_nr_open;
78802+ if (rlim_cur > saved_sysctl_nr_open)
78803+ rlim_cur = saved_sysctl_nr_open;
78804+ if (rlim_max > saved_sysctl_nr_open)
78805+ rlim_max = saved_sysctl_nr_open;
78806+ }
78807+
78808+ task->signal->rlim[i].rlim_cur = rlim_cur;
78809+ task->signal->rlim[i].rlim_max = rlim_max;
78810+
78811+ if (i == RLIMIT_CPU)
78812+ update_rlimit_cpu(task, rlim_cur);
78813+ }
78814+
78815+ return;
78816+}
78817+
78818+/* both of the below must be called with
78819+ rcu_read_lock();
78820+ read_lock(&tasklist_lock);
78821+ read_lock(&grsec_exec_file_lock);
78822+ except in the case of gr_set_role_label() (for __gr_get_subject_for_task)
78823+*/
78824+
78825+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback)
78826+{
78827+ char *tmpname;
78828+ struct acl_subject_label *tmpsubj;
78829+ struct file *filp;
78830+ struct name_entry *nmatch;
78831+
78832+ filp = task->exec_file;
78833+ if (filp == NULL)
78834+ return NULL;
78835+
78836+ /* the following is to apply the correct subject
78837+ on binaries running when the RBAC system
78838+ is enabled, when the binaries have been
78839+ replaced or deleted since their execution
78840+ -----
78841+ when the RBAC system starts, the inode/dev
78842+ from exec_file will be one the RBAC system
78843+ is unaware of. It only knows the inode/dev
78844+ of the present file on disk, or the absence
78845+ of it.
78846+ */
78847+
78848+ if (filename)
78849+ nmatch = __lookup_name_entry(state, filename);
78850+ else {
78851+ preempt_disable();
78852+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
78853+
78854+ nmatch = __lookup_name_entry(state, tmpname);
78855+ preempt_enable();
78856+ }
78857+ tmpsubj = NULL;
78858+ if (nmatch) {
78859+ if (nmatch->deleted)
78860+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
78861+ else
78862+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
78863+ }
78864+ /* this also works for the reload case -- if we don't match a potentially inherited subject
78865+ then we fall back to a normal lookup based on the binary's ino/dev
78866+ */
78867+ if (tmpsubj == NULL && fallback)
78868+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
78869+
78870+ return tmpsubj;
78871+}
78872+
78873+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename, int fallback)
78874+{
78875+ return __gr_get_subject_for_task(&running_polstate, task, filename, fallback);
78876+}
78877+
78878+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
78879+{
78880+ struct acl_object_label *obj;
78881+ struct file *filp;
78882+
78883+ filp = task->exec_file;
78884+
78885+ task->acl = subj;
78886+ task->is_writable = 0;
78887+ /* ignore additional mmap checks for processes that are writable
78888+ by the default ACL */
78889+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
78890+ if (unlikely(obj->mode & GR_WRITE))
78891+ task->is_writable = 1;
78892+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
78893+ if (unlikely(obj->mode & GR_WRITE))
78894+ task->is_writable = 1;
78895+
78896+ gr_set_proc_res(task);
78897+
78898+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
78899+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
78900+#endif
78901+}
78902+
78903+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
78904+{
78905+ __gr_apply_subject_to_task(&running_polstate, task, subj);
78906+}
78907+
78908+__u32
78909+gr_search_file(const struct dentry * dentry, const __u32 mode,
78910+ const struct vfsmount * mnt)
78911+{
78912+ __u32 retval = mode;
78913+ struct acl_subject_label *curracl;
78914+ struct acl_object_label *currobj;
78915+
78916+ if (unlikely(!(gr_status & GR_READY)))
78917+ return (mode & ~GR_AUDITS);
78918+
78919+ curracl = current->acl;
78920+
78921+ currobj = chk_obj_label(dentry, mnt, curracl);
78922+ retval = currobj->mode & mode;
78923+
78924+ /* if we're opening a specified transfer file for writing
78925+ (e.g. /dev/initctl), then transfer our role to init
78926+ */
78927+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
78928+ current->role->roletype & GR_ROLE_PERSIST)) {
78929+ struct task_struct *task = init_pid_ns.child_reaper;
78930+
78931+ if (task->role != current->role) {
78932+ struct acl_subject_label *subj;
78933+
78934+ task->acl_sp_role = 0;
78935+ task->acl_role_id = current->acl_role_id;
78936+ task->role = current->role;
78937+ rcu_read_lock();
78938+ read_lock(&grsec_exec_file_lock);
78939+ subj = gr_get_subject_for_task(task, NULL, 1);
78940+ gr_apply_subject_to_task(task, subj);
78941+ read_unlock(&grsec_exec_file_lock);
78942+ rcu_read_unlock();
78943+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
78944+ }
78945+ }
78946+
78947+ if (unlikely
78948+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
78949+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
78950+ __u32 new_mode = mode;
78951+
78952+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
78953+
78954+ retval = new_mode;
78955+
78956+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
78957+ new_mode |= GR_INHERIT;
78958+
78959+ if (!(mode & GR_NOLEARN))
78960+ gr_log_learn(dentry, mnt, new_mode);
78961+ }
78962+
78963+ return retval;
78964+}
78965+
78966+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
78967+ const struct dentry *parent,
78968+ const struct vfsmount *mnt)
78969+{
78970+ struct name_entry *match;
78971+ struct acl_object_label *matchpo;
78972+ struct acl_subject_label *curracl;
78973+ char *path;
78974+
78975+ if (unlikely(!(gr_status & GR_READY)))
78976+ return NULL;
78977+
78978+ preempt_disable();
78979+ path = gr_to_filename_rbac(new_dentry, mnt);
78980+ match = lookup_name_entry_create(path);
78981+
78982+ curracl = current->acl;
78983+
78984+ if (match) {
78985+ read_lock(&gr_inode_lock);
78986+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
78987+ read_unlock(&gr_inode_lock);
78988+
78989+ if (matchpo) {
78990+ preempt_enable();
78991+ return matchpo;
78992+ }
78993+ }
78994+
78995+ // lookup parent
78996+
78997+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
78998+
78999+ preempt_enable();
79000+ return matchpo;
79001+}
79002+
79003+__u32
79004+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
79005+ const struct vfsmount * mnt, const __u32 mode)
79006+{
79007+ struct acl_object_label *matchpo;
79008+ __u32 retval;
79009+
79010+ if (unlikely(!(gr_status & GR_READY)))
79011+ return (mode & ~GR_AUDITS);
79012+
79013+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
79014+
79015+ retval = matchpo->mode & mode;
79016+
79017+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
79018+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
79019+ __u32 new_mode = mode;
79020+
79021+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
79022+
79023+ gr_log_learn(new_dentry, mnt, new_mode);
79024+ return new_mode;
79025+ }
79026+
79027+ return retval;
79028+}
79029+
79030+__u32
79031+gr_check_link(const struct dentry * new_dentry,
79032+ const struct dentry * parent_dentry,
79033+ const struct vfsmount * parent_mnt,
79034+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
79035+{
79036+ struct acl_object_label *obj;
79037+ __u32 oldmode, newmode;
79038+ __u32 needmode;
79039+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
79040+ GR_DELETE | GR_INHERIT;
79041+
79042+ if (unlikely(!(gr_status & GR_READY)))
79043+ return (GR_CREATE | GR_LINK);
79044+
79045+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
79046+ oldmode = obj->mode;
79047+
79048+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
79049+ newmode = obj->mode;
79050+
79051+ needmode = newmode & checkmodes;
79052+
79053+ // old name for hardlink must have at least the permissions of the new name
79054+ if ((oldmode & needmode) != needmode)
79055+ goto bad;
79056+
79057+ // if old name had restrictions/auditing, make sure the new name does as well
79058+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
79059+
79060+ // don't allow hardlinking of suid/sgid/fcapped files without permission
79061+ if (is_privileged_binary(old_dentry))
79062+ needmode |= GR_SETID;
79063+
79064+ if ((newmode & needmode) != needmode)
79065+ goto bad;
79066+
79067+ // enforce minimum permissions
79068+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
79069+ return newmode;
79070+bad:
79071+ needmode = oldmode;
79072+ if (is_privileged_binary(old_dentry))
79073+ needmode |= GR_SETID;
79074+
79075+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
79076+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
79077+ return (GR_CREATE | GR_LINK);
79078+ } else if (newmode & GR_SUPPRESS)
79079+ return GR_SUPPRESS;
79080+ else
79081+ return 0;
79082+}
79083+
79084+int
79085+gr_check_hidden_task(const struct task_struct *task)
79086+{
79087+ if (unlikely(!(gr_status & GR_READY)))
79088+ return 0;
79089+
79090+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
79091+ return 1;
79092+
79093+ return 0;
79094+}
79095+
79096+int
79097+gr_check_protected_task(const struct task_struct *task)
79098+{
79099+ if (unlikely(!(gr_status & GR_READY) || !task))
79100+ return 0;
79101+
79102+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
79103+ task->acl != current->acl)
79104+ return 1;
79105+
79106+ return 0;
79107+}
79108+
79109+int
79110+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
79111+{
79112+ struct task_struct *p;
79113+ int ret = 0;
79114+
79115+ if (unlikely(!(gr_status & GR_READY) || !pid))
79116+ return ret;
79117+
79118+ read_lock(&tasklist_lock);
79119+ do_each_pid_task(pid, type, p) {
79120+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
79121+ p->acl != current->acl) {
79122+ ret = 1;
79123+ goto out;
79124+ }
79125+ } while_each_pid_task(pid, type, p);
79126+out:
79127+ read_unlock(&tasklist_lock);
79128+
79129+ return ret;
79130+}
79131+
79132+void
79133+gr_copy_label(struct task_struct *tsk)
79134+{
79135+ struct task_struct *p = current;
79136+
79137+ tsk->inherited = p->inherited;
79138+ tsk->acl_sp_role = 0;
79139+ tsk->acl_role_id = p->acl_role_id;
79140+ tsk->acl = p->acl;
79141+ tsk->role = p->role;
79142+ tsk->signal->used_accept = 0;
79143+ tsk->signal->curr_ip = p->signal->curr_ip;
79144+ tsk->signal->saved_ip = p->signal->saved_ip;
79145+ if (p->exec_file)
79146+ get_file(p->exec_file);
79147+ tsk->exec_file = p->exec_file;
79148+ tsk->is_writable = p->is_writable;
79149+ if (unlikely(p->signal->used_accept)) {
79150+ p->signal->curr_ip = 0;
79151+ p->signal->saved_ip = 0;
79152+ }
79153+
79154+ return;
79155+}
79156+
79157+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
79158+
79159+int
79160+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
79161+{
79162+ unsigned int i;
79163+ __u16 num;
79164+ uid_t *uidlist;
79165+ uid_t curuid;
79166+ int realok = 0;
79167+ int effectiveok = 0;
79168+ int fsok = 0;
79169+ uid_t globalreal, globaleffective, globalfs;
79170+
79171+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
79172+ struct user_struct *user;
79173+
79174+ if (!uid_valid(real))
79175+ goto skipit;
79176+
79177+ /* find user based on global namespace */
79178+
79179+ globalreal = GR_GLOBAL_UID(real);
79180+
79181+ user = find_user(make_kuid(&init_user_ns, globalreal));
79182+ if (user == NULL)
79183+ goto skipit;
79184+
79185+ if (gr_process_kernel_setuid_ban(user)) {
79186+ /* for find_user */
79187+ free_uid(user);
79188+ return 1;
79189+ }
79190+
79191+ /* for find_user */
79192+ free_uid(user);
79193+
79194+skipit:
79195+#endif
79196+
79197+ if (unlikely(!(gr_status & GR_READY)))
79198+ return 0;
79199+
79200+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
79201+ gr_log_learn_uid_change(real, effective, fs);
79202+
79203+ num = current->acl->user_trans_num;
79204+ uidlist = current->acl->user_transitions;
79205+
79206+ if (uidlist == NULL)
79207+ return 0;
79208+
79209+ if (!uid_valid(real)) {
79210+ realok = 1;
79211+ globalreal = (uid_t)-1;
79212+ } else {
79213+ globalreal = GR_GLOBAL_UID(real);
79214+ }
79215+ if (!uid_valid(effective)) {
79216+ effectiveok = 1;
79217+ globaleffective = (uid_t)-1;
79218+ } else {
79219+ globaleffective = GR_GLOBAL_UID(effective);
79220+ }
79221+ if (!uid_valid(fs)) {
79222+ fsok = 1;
79223+ globalfs = (uid_t)-1;
79224+ } else {
79225+ globalfs = GR_GLOBAL_UID(fs);
79226+ }
79227+
79228+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
79229+ for (i = 0; i < num; i++) {
79230+ curuid = uidlist[i];
79231+ if (globalreal == curuid)
79232+ realok = 1;
79233+ if (globaleffective == curuid)
79234+ effectiveok = 1;
79235+ if (globalfs == curuid)
79236+ fsok = 1;
79237+ }
79238+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
79239+ for (i = 0; i < num; i++) {
79240+ curuid = uidlist[i];
79241+ if (globalreal == curuid)
79242+ break;
79243+ if (globaleffective == curuid)
79244+ break;
79245+ if (globalfs == curuid)
79246+ break;
79247+ }
79248+ /* not in deny list */
79249+ if (i == num) {
79250+ realok = 1;
79251+ effectiveok = 1;
79252+ fsok = 1;
79253+ }
79254+ }
79255+
79256+ if (realok && effectiveok && fsok)
79257+ return 0;
79258+ else {
79259+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
79260+ return 1;
79261+ }
79262+}
79263+
79264+int
79265+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
79266+{
79267+ unsigned int i;
79268+ __u16 num;
79269+ gid_t *gidlist;
79270+ gid_t curgid;
79271+ int realok = 0;
79272+ int effectiveok = 0;
79273+ int fsok = 0;
79274+ gid_t globalreal, globaleffective, globalfs;
79275+
79276+ if (unlikely(!(gr_status & GR_READY)))
79277+ return 0;
79278+
79279+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
79280+ gr_log_learn_gid_change(real, effective, fs);
79281+
79282+ num = current->acl->group_trans_num;
79283+ gidlist = current->acl->group_transitions;
79284+
79285+ if (gidlist == NULL)
79286+ return 0;
79287+
79288+ if (!gid_valid(real)) {
79289+ realok = 1;
79290+ globalreal = (gid_t)-1;
79291+ } else {
79292+ globalreal = GR_GLOBAL_GID(real);
79293+ }
79294+ if (!gid_valid(effective)) {
79295+ effectiveok = 1;
79296+ globaleffective = (gid_t)-1;
79297+ } else {
79298+ globaleffective = GR_GLOBAL_GID(effective);
79299+ }
79300+ if (!gid_valid(fs)) {
79301+ fsok = 1;
79302+ globalfs = (gid_t)-1;
79303+ } else {
79304+ globalfs = GR_GLOBAL_GID(fs);
79305+ }
79306+
79307+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
79308+ for (i = 0; i < num; i++) {
79309+ curgid = gidlist[i];
79310+ if (globalreal == curgid)
79311+ realok = 1;
79312+ if (globaleffective == curgid)
79313+ effectiveok = 1;
79314+ if (globalfs == curgid)
79315+ fsok = 1;
79316+ }
79317+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
79318+ for (i = 0; i < num; i++) {
79319+ curgid = gidlist[i];
79320+ if (globalreal == curgid)
79321+ break;
79322+ if (globaleffective == curgid)
79323+ break;
79324+ if (globalfs == curgid)
79325+ break;
79326+ }
79327+ /* not in deny list */
79328+ if (i == num) {
79329+ realok = 1;
79330+ effectiveok = 1;
79331+ fsok = 1;
79332+ }
79333+ }
79334+
79335+ if (realok && effectiveok && fsok)
79336+ return 0;
79337+ else {
79338+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
79339+ return 1;
79340+ }
79341+}
79342+
79343+extern int gr_acl_is_capable(const int cap);
79344+
79345+void
79346+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
79347+{
79348+ struct acl_role_label *role = task->role;
79349+ struct acl_role_label *origrole = role;
79350+ struct acl_subject_label *subj = NULL;
79351+ struct acl_object_label *obj;
79352+ struct file *filp;
79353+ uid_t uid;
79354+ gid_t gid;
79355+
79356+ if (unlikely(!(gr_status & GR_READY)))
79357+ return;
79358+
79359+ uid = GR_GLOBAL_UID(kuid);
79360+ gid = GR_GLOBAL_GID(kgid);
79361+
79362+ filp = task->exec_file;
79363+
79364+ /* kernel process, we'll give them the kernel role */
79365+ if (unlikely(!filp)) {
79366+ task->role = running_polstate.kernel_role;
79367+ task->acl = running_polstate.kernel_role->root_label;
79368+ return;
79369+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
79370+ /* save the current ip at time of role lookup so that the proper
79371+ IP will be learned for role_allowed_ip */
79372+ task->signal->saved_ip = task->signal->curr_ip;
79373+ role = lookup_acl_role_label(task, uid, gid);
79374+ }
79375+
79376+ /* don't change the role if we're not a privileged process */
79377+ if (role && task->role != role &&
79378+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
79379+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
79380+ return;
79381+
79382+ task->role = role;
79383+
79384+ if (task->inherited) {
79385+ /* if we reached our subject through inheritance, then first see
79386+ if there's a subject of the same name in the new role that has
79387+ an object that would result in the same inherited subject
79388+ */
79389+ subj = gr_get_subject_for_task(task, task->acl->filename, 0);
79390+ if (subj) {
79391+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, subj);
79392+ if (!(obj->mode & GR_INHERIT))
79393+ subj = NULL;
79394+ }
79395+
79396+ }
79397+ if (subj == NULL) {
79398+ /* otherwise:
79399+ perform subject lookup in possibly new role
79400+ we can use this result below in the case where role == task->role
79401+ */
79402+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
79403+ }
79404+
79405+ /* if we changed uid/gid, but result in the same role
79406+ and are using inheritance, don't lose the inherited subject
79407+ if current subject is other than what normal lookup
79408+ would result in, we arrived via inheritance, don't
79409+ lose subject
79410+ */
79411+ if (role != origrole || (!(task->acl->mode & GR_INHERITLEARN) &&
79412+ (subj == task->acl)))
79413+ task->acl = subj;
79414+
79415+ /* leave task->inherited unaffected */
79416+
79417+ task->is_writable = 0;
79418+
79419+ /* ignore additional mmap checks for processes that are writable
79420+ by the default ACL */
79421+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
79422+ if (unlikely(obj->mode & GR_WRITE))
79423+ task->is_writable = 1;
79424+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
79425+ if (unlikely(obj->mode & GR_WRITE))
79426+ task->is_writable = 1;
79427+
79428+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
79429+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
79430+#endif
79431+
79432+ gr_set_proc_res(task);
79433+
79434+ return;
79435+}
79436+
79437+int
79438+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
79439+ const int unsafe_flags)
79440+{
79441+ struct task_struct *task = current;
79442+ struct acl_subject_label *newacl;
79443+ struct acl_object_label *obj;
79444+ __u32 retmode;
79445+
79446+ if (unlikely(!(gr_status & GR_READY)))
79447+ return 0;
79448+
79449+ newacl = chk_subj_label(dentry, mnt, task->role);
79450+
79451+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
79452+ did an exec
79453+ */
79454+ rcu_read_lock();
79455+ read_lock(&tasklist_lock);
79456+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
79457+ (task->parent->acl->mode & GR_POVERRIDE))) {
79458+ read_unlock(&tasklist_lock);
79459+ rcu_read_unlock();
79460+ goto skip_check;
79461+ }
79462+ read_unlock(&tasklist_lock);
79463+ rcu_read_unlock();
79464+
79465+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
79466+ !(task->role->roletype & GR_ROLE_GOD) &&
79467+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
79468+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
79469+ if (unsafe_flags & LSM_UNSAFE_SHARE)
79470+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
79471+ else
79472+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
79473+ return -EACCES;
79474+ }
79475+
79476+skip_check:
79477+
79478+ obj = chk_obj_label(dentry, mnt, task->acl);
79479+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
79480+
79481+ if (!(task->acl->mode & GR_INHERITLEARN) &&
79482+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
79483+ if (obj->nested)
79484+ task->acl = obj->nested;
79485+ else
79486+ task->acl = newacl;
79487+ task->inherited = 0;
79488+ } else {
79489+ task->inherited = 1;
79490+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
79491+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
79492+ }
79493+
79494+ task->is_writable = 0;
79495+
79496+ /* ignore additional mmap checks for processes that are writable
79497+ by the default ACL */
79498+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
79499+ if (unlikely(obj->mode & GR_WRITE))
79500+ task->is_writable = 1;
79501+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
79502+ if (unlikely(obj->mode & GR_WRITE))
79503+ task->is_writable = 1;
79504+
79505+ gr_set_proc_res(task);
79506+
79507+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
79508+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
79509+#endif
79510+ return 0;
79511+}
79512+
79513+/* always called with valid inodev ptr */
79514+static void
79515+do_handle_delete(struct inodev_entry *inodev, const u64 ino, const dev_t dev)
79516+{
79517+ struct acl_object_label *matchpo;
79518+ struct acl_subject_label *matchps;
79519+ struct acl_subject_label *subj;
79520+ struct acl_role_label *role;
79521+ unsigned int x;
79522+
79523+ FOR_EACH_ROLE_START(role)
79524+ FOR_EACH_SUBJECT_START(role, subj, x)
79525+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
79526+ matchpo->mode |= GR_DELETED;
79527+ FOR_EACH_SUBJECT_END(subj,x)
79528+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
79529+ /* nested subjects aren't in the role's subj_hash table */
79530+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
79531+ matchpo->mode |= GR_DELETED;
79532+ FOR_EACH_NESTED_SUBJECT_END(subj)
79533+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
79534+ matchps->mode |= GR_DELETED;
79535+ FOR_EACH_ROLE_END(role)
79536+
79537+ inodev->nentry->deleted = 1;
79538+
79539+ return;
79540+}
79541+
79542+void
79543+gr_handle_delete(const u64 ino, const dev_t dev)
79544+{
79545+ struct inodev_entry *inodev;
79546+
79547+ if (unlikely(!(gr_status & GR_READY)))
79548+ return;
79549+
79550+ write_lock(&gr_inode_lock);
79551+ inodev = lookup_inodev_entry(ino, dev);
79552+ if (inodev != NULL)
79553+ do_handle_delete(inodev, ino, dev);
79554+ write_unlock(&gr_inode_lock);
79555+
79556+ return;
79557+}
79558+
79559+static void
79560+update_acl_obj_label(const u64 oldinode, const dev_t olddevice,
79561+ const u64 newinode, const dev_t newdevice,
79562+ struct acl_subject_label *subj)
79563+{
79564+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
79565+ struct acl_object_label *match;
79566+
79567+ match = subj->obj_hash[index];
79568+
79569+ while (match && (match->inode != oldinode ||
79570+ match->device != olddevice ||
79571+ !(match->mode & GR_DELETED)))
79572+ match = match->next;
79573+
79574+ if (match && (match->inode == oldinode)
79575+ && (match->device == olddevice)
79576+ && (match->mode & GR_DELETED)) {
79577+ if (match->prev == NULL) {
79578+ subj->obj_hash[index] = match->next;
79579+ if (match->next != NULL)
79580+ match->next->prev = NULL;
79581+ } else {
79582+ match->prev->next = match->next;
79583+ if (match->next != NULL)
79584+ match->next->prev = match->prev;
79585+ }
79586+ match->prev = NULL;
79587+ match->next = NULL;
79588+ match->inode = newinode;
79589+ match->device = newdevice;
79590+ match->mode &= ~GR_DELETED;
79591+
79592+ insert_acl_obj_label(match, subj);
79593+ }
79594+
79595+ return;
79596+}
79597+
79598+static void
79599+update_acl_subj_label(const u64 oldinode, const dev_t olddevice,
79600+ const u64 newinode, const dev_t newdevice,
79601+ struct acl_role_label *role)
79602+{
79603+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
79604+ struct acl_subject_label *match;
79605+
79606+ match = role->subj_hash[index];
79607+
79608+ while (match && (match->inode != oldinode ||
79609+ match->device != olddevice ||
79610+ !(match->mode & GR_DELETED)))
79611+ match = match->next;
79612+
79613+ if (match && (match->inode == oldinode)
79614+ && (match->device == olddevice)
79615+ && (match->mode & GR_DELETED)) {
79616+ if (match->prev == NULL) {
79617+ role->subj_hash[index] = match->next;
79618+ if (match->next != NULL)
79619+ match->next->prev = NULL;
79620+ } else {
79621+ match->prev->next = match->next;
79622+ if (match->next != NULL)
79623+ match->next->prev = match->prev;
79624+ }
79625+ match->prev = NULL;
79626+ match->next = NULL;
79627+ match->inode = newinode;
79628+ match->device = newdevice;
79629+ match->mode &= ~GR_DELETED;
79630+
79631+ insert_acl_subj_label(match, role);
79632+ }
79633+
79634+ return;
79635+}
79636+
79637+static void
79638+update_inodev_entry(const u64 oldinode, const dev_t olddevice,
79639+ const u64 newinode, const dev_t newdevice)
79640+{
79641+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
79642+ struct inodev_entry *match;
79643+
79644+ match = running_polstate.inodev_set.i_hash[index];
79645+
79646+ while (match && (match->nentry->inode != oldinode ||
79647+ match->nentry->device != olddevice || !match->nentry->deleted))
79648+ match = match->next;
79649+
79650+ if (match && (match->nentry->inode == oldinode)
79651+ && (match->nentry->device == olddevice) &&
79652+ match->nentry->deleted) {
79653+ if (match->prev == NULL) {
79654+ running_polstate.inodev_set.i_hash[index] = match->next;
79655+ if (match->next != NULL)
79656+ match->next->prev = NULL;
79657+ } else {
79658+ match->prev->next = match->next;
79659+ if (match->next != NULL)
79660+ match->next->prev = match->prev;
79661+ }
79662+ match->prev = NULL;
79663+ match->next = NULL;
79664+ match->nentry->inode = newinode;
79665+ match->nentry->device = newdevice;
79666+ match->nentry->deleted = 0;
79667+
79668+ insert_inodev_entry(match);
79669+ }
79670+
79671+ return;
79672+}
79673+
79674+static void
79675+__do_handle_create(const struct name_entry *matchn, u64 ino, dev_t dev)
79676+{
79677+ struct acl_subject_label *subj;
79678+ struct acl_role_label *role;
79679+ unsigned int x;
79680+
79681+ FOR_EACH_ROLE_START(role)
79682+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
79683+
79684+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
79685+ if ((subj->inode == ino) && (subj->device == dev)) {
79686+ subj->inode = ino;
79687+ subj->device = dev;
79688+ }
79689+ /* nested subjects aren't in the role's subj_hash table */
79690+ update_acl_obj_label(matchn->inode, matchn->device,
79691+ ino, dev, subj);
79692+ FOR_EACH_NESTED_SUBJECT_END(subj)
79693+ FOR_EACH_SUBJECT_START(role, subj, x)
79694+ update_acl_obj_label(matchn->inode, matchn->device,
79695+ ino, dev, subj);
79696+ FOR_EACH_SUBJECT_END(subj,x)
79697+ FOR_EACH_ROLE_END(role)
79698+
79699+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
79700+
79701+ return;
79702+}
79703+
79704+static void
79705+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
79706+ const struct vfsmount *mnt)
79707+{
79708+ u64 ino = __get_ino(dentry);
79709+ dev_t dev = __get_dev(dentry);
79710+
79711+ __do_handle_create(matchn, ino, dev);
79712+
79713+ return;
79714+}
79715+
79716+void
79717+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
79718+{
79719+ struct name_entry *matchn;
79720+
79721+ if (unlikely(!(gr_status & GR_READY)))
79722+ return;
79723+
79724+ preempt_disable();
79725+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
79726+
79727+ if (unlikely((unsigned long)matchn)) {
79728+ write_lock(&gr_inode_lock);
79729+ do_handle_create(matchn, dentry, mnt);
79730+ write_unlock(&gr_inode_lock);
79731+ }
79732+ preempt_enable();
79733+
79734+ return;
79735+}
79736+
79737+void
79738+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
79739+{
79740+ struct name_entry *matchn;
79741+
79742+ if (unlikely(!(gr_status & GR_READY)))
79743+ return;
79744+
79745+ preempt_disable();
79746+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
79747+
79748+ if (unlikely((unsigned long)matchn)) {
79749+ write_lock(&gr_inode_lock);
79750+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
79751+ write_unlock(&gr_inode_lock);
79752+ }
79753+ preempt_enable();
79754+
79755+ return;
79756+}
79757+
79758+void
79759+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
79760+ struct dentry *old_dentry,
79761+ struct dentry *new_dentry,
79762+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
79763+{
79764+ struct name_entry *matchn;
79765+ struct name_entry *matchn2 = NULL;
79766+ struct inodev_entry *inodev;
79767+ struct inode *inode = new_dentry->d_inode;
79768+ u64 old_ino = __get_ino(old_dentry);
79769+ dev_t old_dev = __get_dev(old_dentry);
79770+ unsigned int exchange = flags & RENAME_EXCHANGE;
79771+
79772+ /* vfs_rename swaps the name and parent link for old_dentry and
79773+ new_dentry
79774+ at this point, old_dentry has the new name, parent link, and inode
79775+ for the renamed file
79776+ if a file is being replaced by a rename, new_dentry has the inode
79777+ and name for the replaced file
79778+ */
79779+
79780+ if (unlikely(!(gr_status & GR_READY)))
79781+ return;
79782+
79783+ preempt_disable();
79784+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
79785+
79786+ /* exchange cases:
79787+ a filename exists for the source, but not dest
79788+ do a recreate on source
79789+ a filename exists for the dest, but not source
79790+ do a recreate on dest
79791+ a filename exists for both source and dest
79792+ delete source and dest, then create source and dest
79793+ a filename exists for neither source nor dest
79794+ no updates needed
79795+
79796+ the name entry lookups get us the old inode/dev associated with
79797+ each name, so do the deletes first (if possible) so that when
79798+ we do the create, we pick up on the right entries
79799+ */
79800+
79801+ if (exchange)
79802+ matchn2 = lookup_name_entry(gr_to_filename_rbac(new_dentry, mnt));
79803+
79804+ /* we wouldn't have to check d_inode if it weren't for
79805+ NFS silly-renaming
79806+ */
79807+
79808+ write_lock(&gr_inode_lock);
79809+ if (unlikely((replace || exchange) && inode)) {
79810+ u64 new_ino = __get_ino(new_dentry);
79811+ dev_t new_dev = __get_dev(new_dentry);
79812+
79813+ inodev = lookup_inodev_entry(new_ino, new_dev);
79814+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
79815+ do_handle_delete(inodev, new_ino, new_dev);
79816+ }
79817+
79818+ inodev = lookup_inodev_entry(old_ino, old_dev);
79819+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
79820+ do_handle_delete(inodev, old_ino, old_dev);
79821+
79822+ if (unlikely(matchn != NULL))
79823+ do_handle_create(matchn, old_dentry, mnt);
79824+
79825+ if (unlikely(matchn2 != NULL))
79826+ do_handle_create(matchn2, new_dentry, mnt);
79827+
79828+ write_unlock(&gr_inode_lock);
79829+ preempt_enable();
79830+
79831+ return;
79832+}
79833+
79834+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
79835+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
79836+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
79837+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
79838+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
79839+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
79840+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
79841+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
79842+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
79843+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
79844+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
79845+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
79846+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
79847+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
79848+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
79849+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
79850+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
79851+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
79852+};
79853+
79854+void
79855+gr_learn_resource(const struct task_struct *task,
79856+ const int res, const unsigned long wanted, const int gt)
79857+{
79858+ struct acl_subject_label *acl;
79859+ const struct cred *cred;
79860+
79861+ if (unlikely((gr_status & GR_READY) &&
79862+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
79863+ goto skip_reslog;
79864+
79865+ gr_log_resource(task, res, wanted, gt);
79866+skip_reslog:
79867+
79868+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
79869+ return;
79870+
79871+ acl = task->acl;
79872+
79873+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
79874+ !(acl->resmask & (1U << (unsigned short) res))))
79875+ return;
79876+
79877+ if (wanted >= acl->res[res].rlim_cur) {
79878+ unsigned long res_add;
79879+
79880+ res_add = wanted + res_learn_bumps[res];
79881+
79882+ acl->res[res].rlim_cur = res_add;
79883+
79884+ if (wanted > acl->res[res].rlim_max)
79885+ acl->res[res].rlim_max = res_add;
79886+
79887+ /* only log the subject filename, since resource logging is supported for
79888+ single-subject learning only */
79889+ rcu_read_lock();
79890+ cred = __task_cred(task);
79891+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
79892+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
79893+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
79894+ "", (unsigned long) res, &task->signal->saved_ip);
79895+ rcu_read_unlock();
79896+ }
79897+
79898+ return;
79899+}
79900+EXPORT_SYMBOL_GPL(gr_learn_resource);
79901+#endif
79902+
79903+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
79904+void
79905+pax_set_initial_flags(struct linux_binprm *bprm)
79906+{
79907+ struct task_struct *task = current;
79908+ struct acl_subject_label *proc;
79909+ unsigned long flags;
79910+
79911+ if (unlikely(!(gr_status & GR_READY)))
79912+ return;
79913+
79914+ flags = pax_get_flags(task);
79915+
79916+ proc = task->acl;
79917+
79918+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
79919+ flags &= ~MF_PAX_PAGEEXEC;
79920+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
79921+ flags &= ~MF_PAX_SEGMEXEC;
79922+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
79923+ flags &= ~MF_PAX_RANDMMAP;
79924+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
79925+ flags &= ~MF_PAX_EMUTRAMP;
79926+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
79927+ flags &= ~MF_PAX_MPROTECT;
79928+
79929+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
79930+ flags |= MF_PAX_PAGEEXEC;
79931+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
79932+ flags |= MF_PAX_SEGMEXEC;
79933+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
79934+ flags |= MF_PAX_RANDMMAP;
79935+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
79936+ flags |= MF_PAX_EMUTRAMP;
79937+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
79938+ flags |= MF_PAX_MPROTECT;
79939+
79940+ pax_set_flags(task, flags);
79941+
79942+ return;
79943+}
79944+#endif
79945+
79946+int
79947+gr_handle_proc_ptrace(struct task_struct *task)
79948+{
79949+ struct file *filp;
79950+ struct task_struct *tmp = task;
79951+ struct task_struct *curtemp = current;
79952+ __u32 retmode;
79953+
79954+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
79955+ if (unlikely(!(gr_status & GR_READY)))
79956+ return 0;
79957+#endif
79958+
79959+ read_lock(&tasklist_lock);
79960+ read_lock(&grsec_exec_file_lock);
79961+ filp = task->exec_file;
79962+
79963+ while (task_pid_nr(tmp) > 0) {
79964+ if (tmp == curtemp)
79965+ break;
79966+ tmp = tmp->real_parent;
79967+ }
79968+
79969+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
79970+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
79971+ read_unlock(&grsec_exec_file_lock);
79972+ read_unlock(&tasklist_lock);
79973+ return 1;
79974+ }
79975+
79976+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
79977+ if (!(gr_status & GR_READY)) {
79978+ read_unlock(&grsec_exec_file_lock);
79979+ read_unlock(&tasklist_lock);
79980+ return 0;
79981+ }
79982+#endif
79983+
79984+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
79985+ read_unlock(&grsec_exec_file_lock);
79986+ read_unlock(&tasklist_lock);
79987+
79988+ if (retmode & GR_NOPTRACE)
79989+ return 1;
79990+
79991+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
79992+ && (current->acl != task->acl || (current->acl != current->role->root_label
79993+ && task_pid_nr(current) != task_pid_nr(task))))
79994+ return 1;
79995+
79996+ return 0;
79997+}
79998+
79999+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
80000+{
80001+ if (unlikely(!(gr_status & GR_READY)))
80002+ return;
80003+
80004+ if (!(current->role->roletype & GR_ROLE_GOD))
80005+ return;
80006+
80007+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
80008+ p->role->rolename, gr_task_roletype_to_char(p),
80009+ p->acl->filename);
80010+}
80011+
80012+int
80013+gr_handle_ptrace(struct task_struct *task, const long request)
80014+{
80015+ struct task_struct *tmp = task;
80016+ struct task_struct *curtemp = current;
80017+ __u32 retmode;
80018+
80019+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
80020+ if (unlikely(!(gr_status & GR_READY)))
80021+ return 0;
80022+#endif
80023+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
80024+ read_lock(&tasklist_lock);
80025+ while (task_pid_nr(tmp) > 0) {
80026+ if (tmp == curtemp)
80027+ break;
80028+ tmp = tmp->real_parent;
80029+ }
80030+
80031+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
80032+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
80033+ read_unlock(&tasklist_lock);
80034+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
80035+ return 1;
80036+ }
80037+ read_unlock(&tasklist_lock);
80038+ }
80039+
80040+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
80041+ if (!(gr_status & GR_READY))
80042+ return 0;
80043+#endif
80044+
80045+ read_lock(&grsec_exec_file_lock);
80046+ if (unlikely(!task->exec_file)) {
80047+ read_unlock(&grsec_exec_file_lock);
80048+ return 0;
80049+ }
80050+
80051+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
80052+ read_unlock(&grsec_exec_file_lock);
80053+
80054+ if (retmode & GR_NOPTRACE) {
80055+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
80056+ return 1;
80057+ }
80058+
80059+ if (retmode & GR_PTRACERD) {
80060+ switch (request) {
80061+ case PTRACE_SEIZE:
80062+ case PTRACE_POKETEXT:
80063+ case PTRACE_POKEDATA:
80064+ case PTRACE_POKEUSR:
80065+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
80066+ case PTRACE_SETREGS:
80067+ case PTRACE_SETFPREGS:
80068+#endif
80069+#ifdef CONFIG_X86
80070+ case PTRACE_SETFPXREGS:
80071+#endif
80072+#ifdef CONFIG_ALTIVEC
80073+ case PTRACE_SETVRREGS:
80074+#endif
80075+ return 1;
80076+ default:
80077+ return 0;
80078+ }
80079+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
80080+ !(current->role->roletype & GR_ROLE_GOD) &&
80081+ (current->acl != task->acl)) {
80082+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
80083+ return 1;
80084+ }
80085+
80086+ return 0;
80087+}
80088+
80089+static int is_writable_mmap(const struct file *filp)
80090+{
80091+ struct task_struct *task = current;
80092+ struct acl_object_label *obj, *obj2;
80093+
80094+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
80095+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
80096+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
80097+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
80098+ task->role->root_label);
80099+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
80100+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
80101+ return 1;
80102+ }
80103+ }
80104+ return 0;
80105+}
80106+
80107+int
80108+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
80109+{
80110+ __u32 mode;
80111+
80112+ if (unlikely(!file || !(prot & PROT_EXEC)))
80113+ return 1;
80114+
80115+ if (is_writable_mmap(file))
80116+ return 0;
80117+
80118+ mode =
80119+ gr_search_file(file->f_path.dentry,
80120+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
80121+ file->f_path.mnt);
80122+
80123+ if (!gr_tpe_allow(file))
80124+ return 0;
80125+
80126+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
80127+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
80128+ return 0;
80129+ } else if (unlikely(!(mode & GR_EXEC))) {
80130+ return 0;
80131+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
80132+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
80133+ return 1;
80134+ }
80135+
80136+ return 1;
80137+}
80138+
80139+int
80140+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
80141+{
80142+ __u32 mode;
80143+
80144+ if (unlikely(!file || !(prot & PROT_EXEC)))
80145+ return 1;
80146+
80147+ if (is_writable_mmap(file))
80148+ return 0;
80149+
80150+ mode =
80151+ gr_search_file(file->f_path.dentry,
80152+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
80153+ file->f_path.mnt);
80154+
80155+ if (!gr_tpe_allow(file))
80156+ return 0;
80157+
80158+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
80159+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
80160+ return 0;
80161+ } else if (unlikely(!(mode & GR_EXEC))) {
80162+ return 0;
80163+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
80164+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
80165+ return 1;
80166+ }
80167+
80168+ return 1;
80169+}
80170+
80171+void
80172+gr_acl_handle_psacct(struct task_struct *task, const long code)
80173+{
80174+ unsigned long runtime, cputime;
80175+ cputime_t utime, stime;
80176+ unsigned int wday, cday;
80177+ __u8 whr, chr;
80178+ __u8 wmin, cmin;
80179+ __u8 wsec, csec;
80180+ struct timespec curtime, starttime;
80181+
80182+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
80183+ !(task->acl->mode & GR_PROCACCT)))
80184+ return;
80185+
80186+ curtime = ns_to_timespec(ktime_get_ns());
80187+ starttime = ns_to_timespec(task->start_time);
80188+ runtime = curtime.tv_sec - starttime.tv_sec;
80189+ wday = runtime / (60 * 60 * 24);
80190+ runtime -= wday * (60 * 60 * 24);
80191+ whr = runtime / (60 * 60);
80192+ runtime -= whr * (60 * 60);
80193+ wmin = runtime / 60;
80194+ runtime -= wmin * 60;
80195+ wsec = runtime;
80196+
80197+ task_cputime(task, &utime, &stime);
80198+ cputime = cputime_to_secs(utime + stime);
80199+ cday = cputime / (60 * 60 * 24);
80200+ cputime -= cday * (60 * 60 * 24);
80201+ chr = cputime / (60 * 60);
80202+ cputime -= chr * (60 * 60);
80203+ cmin = cputime / 60;
80204+ cputime -= cmin * 60;
80205+ csec = cputime;
80206+
80207+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
80208+
80209+ return;
80210+}
80211+
80212+#ifdef CONFIG_TASKSTATS
80213+int gr_is_taskstats_denied(int pid)
80214+{
80215+ struct task_struct *task;
80216+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
80217+ const struct cred *cred;
80218+#endif
80219+ int ret = 0;
80220+
80221+ /* restrict taskstats viewing to un-chrooted root users
80222+ who have the 'view' subject flag if the RBAC system is enabled
80223+ */
80224+
80225+ rcu_read_lock();
80226+ read_lock(&tasklist_lock);
80227+ task = find_task_by_vpid(pid);
80228+ if (task) {
80229+#ifdef CONFIG_GRKERNSEC_CHROOT
80230+ if (proc_is_chrooted(task))
80231+ ret = -EACCES;
80232+#endif
80233+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
80234+ cred = __task_cred(task);
80235+#ifdef CONFIG_GRKERNSEC_PROC_USER
80236+ if (gr_is_global_nonroot(cred->uid))
80237+ ret = -EACCES;
80238+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
80239+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
80240+ ret = -EACCES;
80241+#endif
80242+#endif
80243+ if (gr_status & GR_READY) {
80244+ if (!(task->acl->mode & GR_VIEW))
80245+ ret = -EACCES;
80246+ }
80247+ } else
80248+ ret = -ENOENT;
80249+
80250+ read_unlock(&tasklist_lock);
80251+ rcu_read_unlock();
80252+
80253+ return ret;
80254+}
80255+#endif
80256+
80257+/* AUXV entries are filled via a descendant of search_binary_handler
80258+ after we've already applied the subject for the target
80259+*/
80260+int gr_acl_enable_at_secure(void)
80261+{
80262+ if (unlikely(!(gr_status & GR_READY)))
80263+ return 0;
80264+
80265+ if (current->acl->mode & GR_ATSECURE)
80266+ return 1;
80267+
80268+ return 0;
80269+}
80270+
80271+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const u64 ino)
80272+{
80273+ struct task_struct *task = current;
80274+ struct dentry *dentry = file->f_path.dentry;
80275+ struct vfsmount *mnt = file->f_path.mnt;
80276+ struct acl_object_label *obj, *tmp;
80277+ struct acl_subject_label *subj;
80278+ unsigned int bufsize;
80279+ int is_not_root;
80280+ char *path;
80281+ dev_t dev = __get_dev(dentry);
80282+
80283+ if (unlikely(!(gr_status & GR_READY)))
80284+ return 1;
80285+
80286+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
80287+ return 1;
80288+
80289+ /* ignore Eric Biederman */
80290+ if (IS_PRIVATE(dentry->d_inode))
80291+ return 1;
80292+
80293+ subj = task->acl;
80294+ read_lock(&gr_inode_lock);
80295+ do {
80296+ obj = lookup_acl_obj_label(ino, dev, subj);
80297+ if (obj != NULL) {
80298+ read_unlock(&gr_inode_lock);
80299+ return (obj->mode & GR_FIND) ? 1 : 0;
80300+ }
80301+ } while ((subj = subj->parent_subject));
80302+ read_unlock(&gr_inode_lock);
80303+
80304+ /* this is purely an optimization since we're looking for an object
80305+ for the directory we're doing a readdir on
80306+ if it's possible for any globbed object to match the entry we're
80307+ filling into the directory, then the object we find here will be
80308+ an anchor point with attached globbed objects
80309+ */
80310+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
80311+ if (obj->globbed == NULL)
80312+ return (obj->mode & GR_FIND) ? 1 : 0;
80313+
80314+ is_not_root = ((obj->filename[0] == '/') &&
80315+ (obj->filename[1] == '\0')) ? 0 : 1;
80316+ bufsize = PAGE_SIZE - namelen - is_not_root;
80317+
80318+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
80319+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
80320+ return 1;
80321+
80322+ preempt_disable();
80323+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
80324+ bufsize);
80325+
80326+ bufsize = strlen(path);
80327+
80328+ /* if base is "/", don't append an additional slash */
80329+ if (is_not_root)
80330+ *(path + bufsize) = '/';
80331+ memcpy(path + bufsize + is_not_root, name, namelen);
80332+ *(path + bufsize + namelen + is_not_root) = '\0';
80333+
80334+ tmp = obj->globbed;
80335+ while (tmp) {
80336+ if (!glob_match(tmp->filename, path)) {
80337+ preempt_enable();
80338+ return (tmp->mode & GR_FIND) ? 1 : 0;
80339+ }
80340+ tmp = tmp->next;
80341+ }
80342+ preempt_enable();
80343+ return (obj->mode & GR_FIND) ? 1 : 0;
80344+}
80345+
80346+void gr_put_exec_file(struct task_struct *task)
80347+{
80348+ struct file *filp;
80349+
80350+ write_lock(&grsec_exec_file_lock);
80351+ filp = task->exec_file;
80352+ task->exec_file = NULL;
80353+ write_unlock(&grsec_exec_file_lock);
80354+
80355+ if (filp)
80356+ fput(filp);
80357+
80358+ return;
80359+}
80360+
80361+
80362+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
80363+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
80364+#endif
80365+#ifdef CONFIG_SECURITY
80366+EXPORT_SYMBOL_GPL(gr_check_user_change);
80367+EXPORT_SYMBOL_GPL(gr_check_group_change);
80368+#endif
80369+
80370diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
80371new file mode 100644
80372index 0000000..9adc75c
80373--- /dev/null
80374+++ b/grsecurity/gracl_alloc.c
80375@@ -0,0 +1,105 @@
80376+#include <linux/kernel.h>
80377+#include <linux/mm.h>
80378+#include <linux/slab.h>
80379+#include <linux/vmalloc.h>
80380+#include <linux/gracl.h>
80381+#include <linux/grsecurity.h>
80382+
80383+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
80384+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
80385+
80386+static int
80387+alloc_pop(void)
80388+{
80389+ if (current_alloc_state->alloc_stack_next == 1)
80390+ return 0;
80391+
80392+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
80393+
80394+ current_alloc_state->alloc_stack_next--;
80395+
80396+ return 1;
80397+}
80398+
80399+static int
80400+alloc_push(void *buf)
80401+{
80402+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
80403+ return 1;
80404+
80405+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
80406+
80407+ current_alloc_state->alloc_stack_next++;
80408+
80409+ return 0;
80410+}
80411+
80412+void *
80413+acl_alloc(unsigned long len)
80414+{
80415+ void *ret = NULL;
80416+
80417+ if (!len || len > PAGE_SIZE)
80418+ goto out;
80419+
80420+ ret = kmalloc(len, GFP_KERNEL);
80421+
80422+ if (ret) {
80423+ if (alloc_push(ret)) {
80424+ kfree(ret);
80425+ ret = NULL;
80426+ }
80427+ }
80428+
80429+out:
80430+ return ret;
80431+}
80432+
80433+void *
80434+acl_alloc_num(unsigned long num, unsigned long len)
80435+{
80436+ if (!len || (num > (PAGE_SIZE / len)))
80437+ return NULL;
80438+
80439+ return acl_alloc(num * len);
80440+}
80441+
80442+void
80443+acl_free_all(void)
80444+{
80445+ if (!current_alloc_state->alloc_stack)
80446+ return;
80447+
80448+ while (alloc_pop()) ;
80449+
80450+ if (current_alloc_state->alloc_stack) {
80451+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
80452+ kfree(current_alloc_state->alloc_stack);
80453+ else
80454+ vfree(current_alloc_state->alloc_stack);
80455+ }
80456+
80457+ current_alloc_state->alloc_stack = NULL;
80458+ current_alloc_state->alloc_stack_size = 1;
80459+ current_alloc_state->alloc_stack_next = 1;
80460+
80461+ return;
80462+}
80463+
80464+int
80465+acl_alloc_stack_init(unsigned long size)
80466+{
80467+ if ((size * sizeof (void *)) <= PAGE_SIZE)
80468+ current_alloc_state->alloc_stack =
80469+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
80470+ else
80471+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
80472+
80473+ current_alloc_state->alloc_stack_size = size;
80474+ current_alloc_state->alloc_stack_next = 1;
80475+
80476+ if (!current_alloc_state->alloc_stack)
80477+ return 0;
80478+ else
80479+ return 1;
80480+}
80481diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
80482new file mode 100644
80483index 0000000..1a94c11
80484--- /dev/null
80485+++ b/grsecurity/gracl_cap.c
80486@@ -0,0 +1,127 @@
80487+#include <linux/kernel.h>
80488+#include <linux/module.h>
80489+#include <linux/sched.h>
80490+#include <linux/gracl.h>
80491+#include <linux/grsecurity.h>
80492+#include <linux/grinternal.h>
80493+
80494+extern const char *captab_log[];
80495+extern int captab_log_entries;
80496+
80497+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
80498+{
80499+ struct acl_subject_label *curracl;
80500+
80501+ if (!gr_acl_is_enabled())
80502+ return 1;
80503+
80504+ curracl = task->acl;
80505+
80506+ if (curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
80507+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
80508+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
80509+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
80510+ gr_to_filename(task->exec_file->f_path.dentry,
80511+ task->exec_file->f_path.mnt) : curracl->filename,
80512+ curracl->filename, 0UL,
80513+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
80514+ return 1;
80515+ }
80516+
80517+ return 0;
80518+}
80519+
80520+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
80521+{
80522+ struct acl_subject_label *curracl;
80523+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
80524+ kernel_cap_t cap_audit = __cap_empty_set;
80525+
80526+ if (!gr_acl_is_enabled())
80527+ return 1;
80528+
80529+ curracl = task->acl;
80530+
80531+ cap_drop = curracl->cap_lower;
80532+ cap_mask = curracl->cap_mask;
80533+ cap_audit = curracl->cap_invert_audit;
80534+
80535+ while ((curracl = curracl->parent_subject)) {
80536+ /* if the cap isn't specified in the current computed mask but is specified in the
80537+ current level subject, and is lowered in the current level subject, then add
80538+ it to the set of dropped capabilities
80539+ otherwise, add the current level subject's mask to the current computed mask
80540+ */
80541+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
80542+ cap_raise(cap_mask, cap);
80543+ if (cap_raised(curracl->cap_lower, cap))
80544+ cap_raise(cap_drop, cap);
80545+ if (cap_raised(curracl->cap_invert_audit, cap))
80546+ cap_raise(cap_audit, cap);
80547+ }
80548+ }
80549+
80550+ if (!cap_raised(cap_drop, cap)) {
80551+ if (cap_raised(cap_audit, cap))
80552+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
80553+ return 1;
80554+ }
80555+
80556+ /* only learn the capability use if the process has the capability in the
80557+ general case, the two uses in sys.c of gr_learn_cap are an exception
80558+ to this rule to ensure any role transition involves what the full-learned
80559+ policy believes in a privileged process
80560+ */
80561+ if (cap_raised(cred->cap_effective, cap) && gr_learn_cap(task, cred, cap))
80562+ return 1;
80563+
80564+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
80565+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
80566+
80567+ return 0;
80568+}
80569+
80570+int
80571+gr_acl_is_capable(const int cap)
80572+{
80573+ return gr_task_acl_is_capable(current, current_cred(), cap);
80574+}
80575+
80576+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
80577+{
80578+ struct acl_subject_label *curracl;
80579+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
80580+
80581+ if (!gr_acl_is_enabled())
80582+ return 1;
80583+
80584+ curracl = task->acl;
80585+
80586+ cap_drop = curracl->cap_lower;
80587+ cap_mask = curracl->cap_mask;
80588+
80589+ while ((curracl = curracl->parent_subject)) {
80590+ /* if the cap isn't specified in the current computed mask but is specified in the
80591+ current level subject, and is lowered in the current level subject, then add
80592+ it to the set of dropped capabilities
80593+ otherwise, add the current level subject's mask to the current computed mask
80594+ */
80595+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
80596+ cap_raise(cap_mask, cap);
80597+ if (cap_raised(curracl->cap_lower, cap))
80598+ cap_raise(cap_drop, cap);
80599+ }
80600+ }
80601+
80602+ if (!cap_raised(cap_drop, cap))
80603+ return 1;
80604+
80605+ return 0;
80606+}
80607+
80608+int
80609+gr_acl_is_capable_nolog(const int cap)
80610+{
80611+ return gr_task_acl_is_capable_nolog(current, cap);
80612+}
80613+
80614diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
80615new file mode 100644
80616index 0000000..a43dd06
80617--- /dev/null
80618+++ b/grsecurity/gracl_compat.c
80619@@ -0,0 +1,269 @@
80620+#include <linux/kernel.h>
80621+#include <linux/gracl.h>
80622+#include <linux/compat.h>
80623+#include <linux/gracl_compat.h>
80624+
80625+#include <asm/uaccess.h>
80626+
80627+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
80628+{
80629+ struct gr_arg_wrapper_compat uwrapcompat;
80630+
80631+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
80632+ return -EFAULT;
80633+
80634+ if ((uwrapcompat.version != GRSECURITY_VERSION) ||
80635+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
80636+ return -EINVAL;
80637+
80638+ uwrap->arg = compat_ptr(uwrapcompat.arg);
80639+ uwrap->version = uwrapcompat.version;
80640+ uwrap->size = sizeof(struct gr_arg);
80641+
80642+ return 0;
80643+}
80644+
80645+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
80646+{
80647+ struct gr_arg_compat argcompat;
80648+
80649+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
80650+ return -EFAULT;
80651+
80652+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
80653+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
80654+ arg->role_db.num_roles = argcompat.role_db.num_roles;
80655+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
80656+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
80657+ arg->role_db.num_objects = argcompat.role_db.num_objects;
80658+
80659+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
80660+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
80661+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
80662+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
80663+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
80664+ arg->segv_device = argcompat.segv_device;
80665+ arg->segv_inode = argcompat.segv_inode;
80666+ arg->segv_uid = argcompat.segv_uid;
80667+ arg->num_sprole_pws = argcompat.num_sprole_pws;
80668+ arg->mode = argcompat.mode;
80669+
80670+ return 0;
80671+}
80672+
80673+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
80674+{
80675+ struct acl_object_label_compat objcompat;
80676+
80677+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
80678+ return -EFAULT;
80679+
80680+ obj->filename = compat_ptr(objcompat.filename);
80681+ obj->inode = objcompat.inode;
80682+ obj->device = objcompat.device;
80683+ obj->mode = objcompat.mode;
80684+
80685+ obj->nested = compat_ptr(objcompat.nested);
80686+ obj->globbed = compat_ptr(objcompat.globbed);
80687+
80688+ obj->prev = compat_ptr(objcompat.prev);
80689+ obj->next = compat_ptr(objcompat.next);
80690+
80691+ return 0;
80692+}
80693+
80694+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
80695+{
80696+ unsigned int i;
80697+ struct acl_subject_label_compat subjcompat;
80698+
80699+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
80700+ return -EFAULT;
80701+
80702+ subj->filename = compat_ptr(subjcompat.filename);
80703+ subj->inode = subjcompat.inode;
80704+ subj->device = subjcompat.device;
80705+ subj->mode = subjcompat.mode;
80706+ subj->cap_mask = subjcompat.cap_mask;
80707+ subj->cap_lower = subjcompat.cap_lower;
80708+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
80709+
80710+ for (i = 0; i < GR_NLIMITS; i++) {
80711+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
80712+ subj->res[i].rlim_cur = RLIM_INFINITY;
80713+ else
80714+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
80715+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
80716+ subj->res[i].rlim_max = RLIM_INFINITY;
80717+ else
80718+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
80719+ }
80720+ subj->resmask = subjcompat.resmask;
80721+
80722+ subj->user_trans_type = subjcompat.user_trans_type;
80723+ subj->group_trans_type = subjcompat.group_trans_type;
80724+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
80725+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
80726+ subj->user_trans_num = subjcompat.user_trans_num;
80727+ subj->group_trans_num = subjcompat.group_trans_num;
80728+
80729+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
80730+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
80731+ subj->ip_type = subjcompat.ip_type;
80732+ subj->ips = compat_ptr(subjcompat.ips);
80733+ subj->ip_num = subjcompat.ip_num;
80734+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
80735+
80736+ subj->crashes = subjcompat.crashes;
80737+ subj->expires = subjcompat.expires;
80738+
80739+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
80740+ subj->hash = compat_ptr(subjcompat.hash);
80741+ subj->prev = compat_ptr(subjcompat.prev);
80742+ subj->next = compat_ptr(subjcompat.next);
80743+
80744+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
80745+ subj->obj_hash_size = subjcompat.obj_hash_size;
80746+ subj->pax_flags = subjcompat.pax_flags;
80747+
80748+ return 0;
80749+}
80750+
80751+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
80752+{
80753+ struct acl_role_label_compat rolecompat;
80754+
80755+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
80756+ return -EFAULT;
80757+
80758+ role->rolename = compat_ptr(rolecompat.rolename);
80759+ role->uidgid = rolecompat.uidgid;
80760+ role->roletype = rolecompat.roletype;
80761+
80762+ role->auth_attempts = rolecompat.auth_attempts;
80763+ role->expires = rolecompat.expires;
80764+
80765+ role->root_label = compat_ptr(rolecompat.root_label);
80766+ role->hash = compat_ptr(rolecompat.hash);
80767+
80768+ role->prev = compat_ptr(rolecompat.prev);
80769+ role->next = compat_ptr(rolecompat.next);
80770+
80771+ role->transitions = compat_ptr(rolecompat.transitions);
80772+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
80773+ role->domain_children = compat_ptr(rolecompat.domain_children);
80774+ role->domain_child_num = rolecompat.domain_child_num;
80775+
80776+ role->umask = rolecompat.umask;
80777+
80778+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
80779+ role->subj_hash_size = rolecompat.subj_hash_size;
80780+
80781+ return 0;
80782+}
80783+
80784+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
80785+{
80786+ struct role_allowed_ip_compat roleip_compat;
80787+
80788+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
80789+ return -EFAULT;
80790+
80791+ roleip->addr = roleip_compat.addr;
80792+ roleip->netmask = roleip_compat.netmask;
80793+
80794+ roleip->prev = compat_ptr(roleip_compat.prev);
80795+ roleip->next = compat_ptr(roleip_compat.next);
80796+
80797+ return 0;
80798+}
80799+
80800+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
80801+{
80802+ struct role_transition_compat trans_compat;
80803+
80804+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
80805+ return -EFAULT;
80806+
80807+ trans->rolename = compat_ptr(trans_compat.rolename);
80808+
80809+ trans->prev = compat_ptr(trans_compat.prev);
80810+ trans->next = compat_ptr(trans_compat.next);
80811+
80812+ return 0;
80813+
80814+}
80815+
80816+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
80817+{
80818+ struct gr_hash_struct_compat hash_compat;
80819+
80820+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
80821+ return -EFAULT;
80822+
80823+ hash->table = compat_ptr(hash_compat.table);
80824+ hash->nametable = compat_ptr(hash_compat.nametable);
80825+ hash->first = compat_ptr(hash_compat.first);
80826+
80827+ hash->table_size = hash_compat.table_size;
80828+ hash->used_size = hash_compat.used_size;
80829+
80830+ hash->type = hash_compat.type;
80831+
80832+ return 0;
80833+}
80834+
80835+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
80836+{
80837+ compat_uptr_t ptrcompat;
80838+
80839+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
80840+ return -EFAULT;
80841+
80842+ *(void **)ptr = compat_ptr(ptrcompat);
80843+
80844+ return 0;
80845+}
80846+
80847+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
80848+{
80849+ struct acl_ip_label_compat ip_compat;
80850+
80851+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
80852+ return -EFAULT;
80853+
80854+ ip->iface = compat_ptr(ip_compat.iface);
80855+ ip->addr = ip_compat.addr;
80856+ ip->netmask = ip_compat.netmask;
80857+ ip->low = ip_compat.low;
80858+ ip->high = ip_compat.high;
80859+ ip->mode = ip_compat.mode;
80860+ ip->type = ip_compat.type;
80861+
80862+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
80863+
80864+ ip->prev = compat_ptr(ip_compat.prev);
80865+ ip->next = compat_ptr(ip_compat.next);
80866+
80867+ return 0;
80868+}
80869+
80870+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
80871+{
80872+ struct sprole_pw_compat pw_compat;
80873+
80874+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
80875+ return -EFAULT;
80876+
80877+ pw->rolename = compat_ptr(pw_compat.rolename);
80878+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
80879+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
80880+
80881+ return 0;
80882+}
80883+
80884+size_t get_gr_arg_wrapper_size_compat(void)
80885+{
80886+ return sizeof(struct gr_arg_wrapper_compat);
80887+}
80888+
80889diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
80890new file mode 100644
80891index 0000000..8ee8e4f
80892--- /dev/null
80893+++ b/grsecurity/gracl_fs.c
80894@@ -0,0 +1,447 @@
80895+#include <linux/kernel.h>
80896+#include <linux/sched.h>
80897+#include <linux/types.h>
80898+#include <linux/fs.h>
80899+#include <linux/file.h>
80900+#include <linux/stat.h>
80901+#include <linux/grsecurity.h>
80902+#include <linux/grinternal.h>
80903+#include <linux/gracl.h>
80904+
80905+umode_t
80906+gr_acl_umask(void)
80907+{
80908+ if (unlikely(!gr_acl_is_enabled()))
80909+ return 0;
80910+
80911+ return current->role->umask;
80912+}
80913+
80914+__u32
80915+gr_acl_handle_hidden_file(const struct dentry * dentry,
80916+ const struct vfsmount * mnt)
80917+{
80918+ __u32 mode;
80919+
80920+ if (unlikely(d_is_negative(dentry)))
80921+ return GR_FIND;
80922+
80923+ mode =
80924+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
80925+
80926+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
80927+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
80928+ return mode;
80929+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
80930+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
80931+ return 0;
80932+ } else if (unlikely(!(mode & GR_FIND)))
80933+ return 0;
80934+
80935+ return GR_FIND;
80936+}
80937+
80938+__u32
80939+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
80940+ int acc_mode)
80941+{
80942+ __u32 reqmode = GR_FIND;
80943+ __u32 mode;
80944+
80945+ if (unlikely(d_is_negative(dentry)))
80946+ return reqmode;
80947+
80948+ if (acc_mode & MAY_APPEND)
80949+ reqmode |= GR_APPEND;
80950+ else if (acc_mode & MAY_WRITE)
80951+ reqmode |= GR_WRITE;
80952+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
80953+ reqmode |= GR_READ;
80954+
80955+ mode =
80956+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
80957+ mnt);
80958+
80959+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
80960+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
80961+ reqmode & GR_READ ? " reading" : "",
80962+ reqmode & GR_WRITE ? " writing" : reqmode &
80963+ GR_APPEND ? " appending" : "");
80964+ return reqmode;
80965+ } else
80966+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
80967+ {
80968+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
80969+ reqmode & GR_READ ? " reading" : "",
80970+ reqmode & GR_WRITE ? " writing" : reqmode &
80971+ GR_APPEND ? " appending" : "");
80972+ return 0;
80973+ } else if (unlikely((mode & reqmode) != reqmode))
80974+ return 0;
80975+
80976+ return reqmode;
80977+}
80978+
80979+__u32
80980+gr_acl_handle_creat(const struct dentry * dentry,
80981+ const struct dentry * p_dentry,
80982+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
80983+ const int imode)
80984+{
80985+ __u32 reqmode = GR_WRITE | GR_CREATE;
80986+ __u32 mode;
80987+
80988+ if (acc_mode & MAY_APPEND)
80989+ reqmode |= GR_APPEND;
80990+ // if a directory was required or the directory already exists, then
80991+ // don't count this open as a read
80992+ if ((acc_mode & MAY_READ) &&
80993+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
80994+ reqmode |= GR_READ;
80995+ if ((open_flags & O_CREAT) &&
80996+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
80997+ reqmode |= GR_SETID;
80998+
80999+ mode =
81000+ gr_check_create(dentry, p_dentry, p_mnt,
81001+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
81002+
81003+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
81004+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
81005+ reqmode & GR_READ ? " reading" : "",
81006+ reqmode & GR_WRITE ? " writing" : reqmode &
81007+ GR_APPEND ? " appending" : "");
81008+ return reqmode;
81009+ } else
81010+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
81011+ {
81012+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
81013+ reqmode & GR_READ ? " reading" : "",
81014+ reqmode & GR_WRITE ? " writing" : reqmode &
81015+ GR_APPEND ? " appending" : "");
81016+ return 0;
81017+ } else if (unlikely((mode & reqmode) != reqmode))
81018+ return 0;
81019+
81020+ return reqmode;
81021+}
81022+
81023+__u32
81024+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
81025+ const int fmode)
81026+{
81027+ __u32 mode, reqmode = GR_FIND;
81028+
81029+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
81030+ reqmode |= GR_EXEC;
81031+ if (fmode & S_IWOTH)
81032+ reqmode |= GR_WRITE;
81033+ if (fmode & S_IROTH)
81034+ reqmode |= GR_READ;
81035+
81036+ mode =
81037+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
81038+ mnt);
81039+
81040+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
81041+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
81042+ reqmode & GR_READ ? " reading" : "",
81043+ reqmode & GR_WRITE ? " writing" : "",
81044+ reqmode & GR_EXEC ? " executing" : "");
81045+ return reqmode;
81046+ } else
81047+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
81048+ {
81049+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
81050+ reqmode & GR_READ ? " reading" : "",
81051+ reqmode & GR_WRITE ? " writing" : "",
81052+ reqmode & GR_EXEC ? " executing" : "");
81053+ return 0;
81054+ } else if (unlikely((mode & reqmode) != reqmode))
81055+ return 0;
81056+
81057+ return reqmode;
81058+}
81059+
81060+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
81061+{
81062+ __u32 mode;
81063+
81064+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
81065+
81066+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
81067+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
81068+ return mode;
81069+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
81070+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
81071+ return 0;
81072+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
81073+ return 0;
81074+
81075+ return (reqmode);
81076+}
81077+
81078+__u32
81079+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
81080+{
81081+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
81082+}
81083+
81084+__u32
81085+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
81086+{
81087+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
81088+}
81089+
81090+__u32
81091+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
81092+{
81093+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
81094+}
81095+
81096+__u32
81097+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
81098+{
81099+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
81100+}
81101+
81102+__u32
81103+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
81104+ umode_t *modeptr)
81105+{
81106+ umode_t mode;
81107+
81108+ *modeptr &= ~gr_acl_umask();
81109+ mode = *modeptr;
81110+
81111+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
81112+ return 1;
81113+
81114+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
81115+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
81116+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
81117+ GR_CHMOD_ACL_MSG);
81118+ } else {
81119+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
81120+ }
81121+}
81122+
81123+__u32
81124+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
81125+{
81126+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
81127+}
81128+
81129+__u32
81130+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
81131+{
81132+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
81133+}
81134+
81135+__u32
81136+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
81137+{
81138+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
81139+}
81140+
81141+__u32
81142+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
81143+{
81144+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
81145+}
81146+
81147+__u32
81148+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
81149+{
81150+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
81151+ GR_UNIXCONNECT_ACL_MSG);
81152+}
81153+
81154+/* hardlinks require at minimum create and link permission,
81155+ any additional privilege required is based on the
81156+ privilege of the file being linked to
81157+*/
81158+__u32
81159+gr_acl_handle_link(const struct dentry * new_dentry,
81160+ const struct dentry * parent_dentry,
81161+ const struct vfsmount * parent_mnt,
81162+ const struct dentry * old_dentry,
81163+ const struct vfsmount * old_mnt, const struct filename *to)
81164+{
81165+ __u32 mode;
81166+ __u32 needmode = GR_CREATE | GR_LINK;
81167+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
81168+
81169+ mode =
81170+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
81171+ old_mnt);
81172+
81173+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
81174+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
81175+ return mode;
81176+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
81177+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
81178+ return 0;
81179+ } else if (unlikely((mode & needmode) != needmode))
81180+ return 0;
81181+
81182+ return 1;
81183+}
81184+
81185+__u32
81186+gr_acl_handle_symlink(const struct dentry * new_dentry,
81187+ const struct dentry * parent_dentry,
81188+ const struct vfsmount * parent_mnt, const struct filename *from)
81189+{
81190+ __u32 needmode = GR_WRITE | GR_CREATE;
81191+ __u32 mode;
81192+
81193+ mode =
81194+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
81195+ GR_CREATE | GR_AUDIT_CREATE |
81196+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
81197+
81198+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
81199+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
81200+ return mode;
81201+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
81202+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
81203+ return 0;
81204+ } else if (unlikely((mode & needmode) != needmode))
81205+ return 0;
81206+
81207+ return (GR_WRITE | GR_CREATE);
81208+}
81209+
81210+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
81211+{
81212+ __u32 mode;
81213+
81214+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
81215+
81216+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
81217+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
81218+ return mode;
81219+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
81220+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
81221+ return 0;
81222+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
81223+ return 0;
81224+
81225+ return (reqmode);
81226+}
81227+
81228+__u32
81229+gr_acl_handle_mknod(const struct dentry * new_dentry,
81230+ const struct dentry * parent_dentry,
81231+ const struct vfsmount * parent_mnt,
81232+ const int mode)
81233+{
81234+ __u32 reqmode = GR_WRITE | GR_CREATE;
81235+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
81236+ reqmode |= GR_SETID;
81237+
81238+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
81239+ reqmode, GR_MKNOD_ACL_MSG);
81240+}
81241+
81242+__u32
81243+gr_acl_handle_mkdir(const struct dentry *new_dentry,
81244+ const struct dentry *parent_dentry,
81245+ const struct vfsmount *parent_mnt)
81246+{
81247+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
81248+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
81249+}
81250+
81251+#define RENAME_CHECK_SUCCESS(old, new) \
81252+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
81253+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
81254+
81255+int
81256+gr_acl_handle_rename(struct dentry *new_dentry,
81257+ struct dentry *parent_dentry,
81258+ const struct vfsmount *parent_mnt,
81259+ struct dentry *old_dentry,
81260+ struct inode *old_parent_inode,
81261+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags)
81262+{
81263+ __u32 comp1, comp2;
81264+ int error = 0;
81265+
81266+ if (unlikely(!gr_acl_is_enabled()))
81267+ return 0;
81268+
81269+ if (flags & RENAME_EXCHANGE) {
81270+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
81271+ GR_AUDIT_READ | GR_AUDIT_WRITE |
81272+ GR_SUPPRESS, parent_mnt);
81273+ comp2 =
81274+ gr_search_file(old_dentry,
81275+ GR_READ | GR_WRITE | GR_AUDIT_READ |
81276+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
81277+ } else if (d_is_negative(new_dentry)) {
81278+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
81279+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
81280+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
81281+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
81282+ GR_DELETE | GR_AUDIT_DELETE |
81283+ GR_AUDIT_READ | GR_AUDIT_WRITE |
81284+ GR_SUPPRESS, old_mnt);
81285+ } else {
81286+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
81287+ GR_CREATE | GR_DELETE |
81288+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
81289+ GR_AUDIT_READ | GR_AUDIT_WRITE |
81290+ GR_SUPPRESS, parent_mnt);
81291+ comp2 =
81292+ gr_search_file(old_dentry,
81293+ GR_READ | GR_WRITE | GR_AUDIT_READ |
81294+ GR_DELETE | GR_AUDIT_DELETE |
81295+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
81296+ }
81297+
81298+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
81299+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
81300+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
81301+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
81302+ && !(comp2 & GR_SUPPRESS)) {
81303+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
81304+ error = -EACCES;
81305+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
81306+ error = -EACCES;
81307+
81308+ return error;
81309+}
81310+
81311+void
81312+gr_acl_handle_exit(void)
81313+{
81314+ u16 id;
81315+ char *rolename;
81316+
81317+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
81318+ !(current->role->roletype & GR_ROLE_PERSIST))) {
81319+ id = current->acl_role_id;
81320+ rolename = current->role->rolename;
81321+ gr_set_acls(1);
81322+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
81323+ }
81324+
81325+ gr_put_exec_file(current);
81326+ return;
81327+}
81328+
81329+int
81330+gr_acl_handle_procpidmem(const struct task_struct *task)
81331+{
81332+ if (unlikely(!gr_acl_is_enabled()))
81333+ return 0;
81334+
81335+ if (task != current && (task->acl->mode & GR_PROTPROCFD) &&
81336+ !(current->acl->mode & GR_POVERRIDE) &&
81337+ !(current->role->roletype & GR_ROLE_GOD))
81338+ return -EACCES;
81339+
81340+ return 0;
81341+}
81342diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
81343new file mode 100644
81344index 0000000..f056b81
81345--- /dev/null
81346+++ b/grsecurity/gracl_ip.c
81347@@ -0,0 +1,386 @@
81348+#include <linux/kernel.h>
81349+#include <asm/uaccess.h>
81350+#include <asm/errno.h>
81351+#include <net/sock.h>
81352+#include <linux/file.h>
81353+#include <linux/fs.h>
81354+#include <linux/net.h>
81355+#include <linux/in.h>
81356+#include <linux/skbuff.h>
81357+#include <linux/ip.h>
81358+#include <linux/udp.h>
81359+#include <linux/types.h>
81360+#include <linux/sched.h>
81361+#include <linux/netdevice.h>
81362+#include <linux/inetdevice.h>
81363+#include <linux/gracl.h>
81364+#include <linux/grsecurity.h>
81365+#include <linux/grinternal.h>
81366+
81367+#define GR_BIND 0x01
81368+#define GR_CONNECT 0x02
81369+#define GR_INVERT 0x04
81370+#define GR_BINDOVERRIDE 0x08
81371+#define GR_CONNECTOVERRIDE 0x10
81372+#define GR_SOCK_FAMILY 0x20
81373+
81374+static const char * gr_protocols[IPPROTO_MAX] = {
81375+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
81376+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
81377+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
81378+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
81379+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
81380+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
81381+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
81382+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
81383+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
81384+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
81385+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
81386+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
81387+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
81388+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
81389+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
81390+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
81391+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
81392+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
81393+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
81394+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
81395+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
81396+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
81397+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
81398+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
81399+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
81400+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
81401+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
81402+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
81403+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
81404+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
81405+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
81406+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
81407+ };
81408+
81409+static const char * gr_socktypes[SOCK_MAX] = {
81410+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
81411+ "unknown:7", "unknown:8", "unknown:9", "packet"
81412+ };
81413+
81414+static const char * gr_sockfamilies[AF_MAX+1] = {
81415+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
81416+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
81417+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
81418+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
81419+ };
81420+
81421+const char *
81422+gr_proto_to_name(unsigned char proto)
81423+{
81424+ return gr_protocols[proto];
81425+}
81426+
81427+const char *
81428+gr_socktype_to_name(unsigned char type)
81429+{
81430+ return gr_socktypes[type];
81431+}
81432+
81433+const char *
81434+gr_sockfamily_to_name(unsigned char family)
81435+{
81436+ return gr_sockfamilies[family];
81437+}
81438+
81439+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
81440+
81441+int
81442+gr_search_socket(const int domain, const int type, const int protocol)
81443+{
81444+ struct acl_subject_label *curr;
81445+ const struct cred *cred = current_cred();
81446+
81447+ if (unlikely(!gr_acl_is_enabled()))
81448+ goto exit;
81449+
81450+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
81451+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
81452+ goto exit; // let the kernel handle it
81453+
81454+ curr = current->acl;
81455+
81456+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
81457+ /* the family is allowed, if this is PF_INET allow it only if
81458+ the extra sock type/protocol checks pass */
81459+ if (domain == PF_INET)
81460+ goto inet_check;
81461+ goto exit;
81462+ } else {
81463+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
81464+ __u32 fakeip = 0;
81465+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
81466+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
81467+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
81468+ gr_to_filename(current->exec_file->f_path.dentry,
81469+ current->exec_file->f_path.mnt) :
81470+ curr->filename, curr->filename,
81471+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
81472+ &current->signal->saved_ip);
81473+ goto exit;
81474+ }
81475+ goto exit_fail;
81476+ }
81477+
81478+inet_check:
81479+ /* the rest of this checking is for IPv4 only */
81480+ if (!curr->ips)
81481+ goto exit;
81482+
81483+ if ((curr->ip_type & (1U << type)) &&
81484+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
81485+ goto exit;
81486+
81487+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
81488+ /* we don't place acls on raw sockets , and sometimes
81489+ dgram/ip sockets are opened for ioctl and not
81490+ bind/connect, so we'll fake a bind learn log */
81491+ if (type == SOCK_RAW || type == SOCK_PACKET) {
81492+ __u32 fakeip = 0;
81493+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
81494+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
81495+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
81496+ gr_to_filename(current->exec_file->f_path.dentry,
81497+ current->exec_file->f_path.mnt) :
81498+ curr->filename, curr->filename,
81499+ &fakeip, 0, type,
81500+ protocol, GR_CONNECT, &current->signal->saved_ip);
81501+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
81502+ __u32 fakeip = 0;
81503+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
81504+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
81505+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
81506+ gr_to_filename(current->exec_file->f_path.dentry,
81507+ current->exec_file->f_path.mnt) :
81508+ curr->filename, curr->filename,
81509+ &fakeip, 0, type,
81510+ protocol, GR_BIND, &current->signal->saved_ip);
81511+ }
81512+ /* we'll log when they use connect or bind */
81513+ goto exit;
81514+ }
81515+
81516+exit_fail:
81517+ if (domain == PF_INET)
81518+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
81519+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
81520+ else if (rcu_access_pointer(net_families[domain]) != NULL)
81521+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
81522+ gr_socktype_to_name(type), protocol);
81523+
81524+ return 0;
81525+exit:
81526+ return 1;
81527+}
81528+
81529+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
81530+{
81531+ if ((ip->mode & mode) &&
81532+ (ip_port >= ip->low) &&
81533+ (ip_port <= ip->high) &&
81534+ ((ntohl(ip_addr) & our_netmask) ==
81535+ (ntohl(our_addr) & our_netmask))
81536+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
81537+ && (ip->type & (1U << type))) {
81538+ if (ip->mode & GR_INVERT)
81539+ return 2; // specifically denied
81540+ else
81541+ return 1; // allowed
81542+ }
81543+
81544+ return 0; // not specifically allowed, may continue parsing
81545+}
81546+
81547+static int
81548+gr_search_connectbind(const int full_mode, struct sock *sk,
81549+ struct sockaddr_in *addr, const int type)
81550+{
81551+ char iface[IFNAMSIZ] = {0};
81552+ struct acl_subject_label *curr;
81553+ struct acl_ip_label *ip;
81554+ struct inet_sock *isk;
81555+ struct net_device *dev;
81556+ struct in_device *idev;
81557+ unsigned long i;
81558+ int ret;
81559+ int mode = full_mode & (GR_BIND | GR_CONNECT);
81560+ __u32 ip_addr = 0;
81561+ __u32 our_addr;
81562+ __u32 our_netmask;
81563+ char *p;
81564+ __u16 ip_port = 0;
81565+ const struct cred *cred = current_cred();
81566+
81567+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
81568+ return 0;
81569+
81570+ curr = current->acl;
81571+ isk = inet_sk(sk);
81572+
81573+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
81574+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
81575+ addr->sin_addr.s_addr = curr->inaddr_any_override;
81576+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
81577+ struct sockaddr_in saddr;
81578+ int err;
81579+
81580+ saddr.sin_family = AF_INET;
81581+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
81582+ saddr.sin_port = isk->inet_sport;
81583+
81584+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
81585+ if (err)
81586+ return err;
81587+
81588+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
81589+ if (err)
81590+ return err;
81591+ }
81592+
81593+ if (!curr->ips)
81594+ return 0;
81595+
81596+ ip_addr = addr->sin_addr.s_addr;
81597+ ip_port = ntohs(addr->sin_port);
81598+
81599+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
81600+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
81601+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
81602+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
81603+ gr_to_filename(current->exec_file->f_path.dentry,
81604+ current->exec_file->f_path.mnt) :
81605+ curr->filename, curr->filename,
81606+ &ip_addr, ip_port, type,
81607+ sk->sk_protocol, mode, &current->signal->saved_ip);
81608+ return 0;
81609+ }
81610+
81611+ for (i = 0; i < curr->ip_num; i++) {
81612+ ip = *(curr->ips + i);
81613+ if (ip->iface != NULL) {
81614+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
81615+ p = strchr(iface, ':');
81616+ if (p != NULL)
81617+ *p = '\0';
81618+ dev = dev_get_by_name(sock_net(sk), iface);
81619+ if (dev == NULL)
81620+ continue;
81621+ idev = in_dev_get(dev);
81622+ if (idev == NULL) {
81623+ dev_put(dev);
81624+ continue;
81625+ }
81626+ rcu_read_lock();
81627+ for_ifa(idev) {
81628+ if (!strcmp(ip->iface, ifa->ifa_label)) {
81629+ our_addr = ifa->ifa_address;
81630+ our_netmask = 0xffffffff;
81631+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
81632+ if (ret == 1) {
81633+ rcu_read_unlock();
81634+ in_dev_put(idev);
81635+ dev_put(dev);
81636+ return 0;
81637+ } else if (ret == 2) {
81638+ rcu_read_unlock();
81639+ in_dev_put(idev);
81640+ dev_put(dev);
81641+ goto denied;
81642+ }
81643+ }
81644+ } endfor_ifa(idev);
81645+ rcu_read_unlock();
81646+ in_dev_put(idev);
81647+ dev_put(dev);
81648+ } else {
81649+ our_addr = ip->addr;
81650+ our_netmask = ip->netmask;
81651+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
81652+ if (ret == 1)
81653+ return 0;
81654+ else if (ret == 2)
81655+ goto denied;
81656+ }
81657+ }
81658+
81659+denied:
81660+ if (mode == GR_BIND)
81661+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
81662+ else if (mode == GR_CONNECT)
81663+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
81664+
81665+ return -EACCES;
81666+}
81667+
81668+int
81669+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
81670+{
81671+ /* always allow disconnection of dgram sockets with connect */
81672+ if (addr->sin_family == AF_UNSPEC)
81673+ return 0;
81674+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
81675+}
81676+
81677+int
81678+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
81679+{
81680+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
81681+}
81682+
81683+int gr_search_listen(struct socket *sock)
81684+{
81685+ struct sock *sk = sock->sk;
81686+ struct sockaddr_in addr;
81687+
81688+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
81689+ addr.sin_port = inet_sk(sk)->inet_sport;
81690+
81691+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
81692+}
81693+
81694+int gr_search_accept(struct socket *sock)
81695+{
81696+ struct sock *sk = sock->sk;
81697+ struct sockaddr_in addr;
81698+
81699+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
81700+ addr.sin_port = inet_sk(sk)->inet_sport;
81701+
81702+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
81703+}
81704+
81705+int
81706+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
81707+{
81708+ if (addr)
81709+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
81710+ else {
81711+ struct sockaddr_in sin;
81712+ const struct inet_sock *inet = inet_sk(sk);
81713+
81714+ sin.sin_addr.s_addr = inet->inet_daddr;
81715+ sin.sin_port = inet->inet_dport;
81716+
81717+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
81718+ }
81719+}
81720+
81721+int
81722+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
81723+{
81724+ struct sockaddr_in sin;
81725+
81726+ if (unlikely(skb->len < sizeof (struct udphdr)))
81727+ return 0; // skip this packet
81728+
81729+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
81730+ sin.sin_port = udp_hdr(skb)->source;
81731+
81732+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
81733+}
81734diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
81735new file mode 100644
81736index 0000000..25f54ef
81737--- /dev/null
81738+++ b/grsecurity/gracl_learn.c
81739@@ -0,0 +1,207 @@
81740+#include <linux/kernel.h>
81741+#include <linux/mm.h>
81742+#include <linux/sched.h>
81743+#include <linux/poll.h>
81744+#include <linux/string.h>
81745+#include <linux/file.h>
81746+#include <linux/types.h>
81747+#include <linux/vmalloc.h>
81748+#include <linux/grinternal.h>
81749+
81750+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
81751+ size_t count, loff_t *ppos);
81752+extern int gr_acl_is_enabled(void);
81753+
81754+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
81755+static int gr_learn_attached;
81756+
81757+/* use a 512k buffer */
81758+#define LEARN_BUFFER_SIZE (512 * 1024)
81759+
81760+static DEFINE_SPINLOCK(gr_learn_lock);
81761+static DEFINE_MUTEX(gr_learn_user_mutex);
81762+
81763+/* we need to maintain two buffers, so that the kernel context of grlearn
81764+ uses a semaphore around the userspace copying, and the other kernel contexts
81765+ use a spinlock when copying into the buffer, since they cannot sleep
81766+*/
81767+static char *learn_buffer;
81768+static char *learn_buffer_user;
81769+static int learn_buffer_len;
81770+static int learn_buffer_user_len;
81771+
81772+static ssize_t
81773+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
81774+{
81775+ DECLARE_WAITQUEUE(wait, current);
81776+ ssize_t retval = 0;
81777+
81778+ add_wait_queue(&learn_wait, &wait);
81779+ set_current_state(TASK_INTERRUPTIBLE);
81780+ do {
81781+ mutex_lock(&gr_learn_user_mutex);
81782+ spin_lock(&gr_learn_lock);
81783+ if (learn_buffer_len)
81784+ break;
81785+ spin_unlock(&gr_learn_lock);
81786+ mutex_unlock(&gr_learn_user_mutex);
81787+ if (file->f_flags & O_NONBLOCK) {
81788+ retval = -EAGAIN;
81789+ goto out;
81790+ }
81791+ if (signal_pending(current)) {
81792+ retval = -ERESTARTSYS;
81793+ goto out;
81794+ }
81795+
81796+ schedule();
81797+ } while (1);
81798+
81799+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
81800+ learn_buffer_user_len = learn_buffer_len;
81801+ retval = learn_buffer_len;
81802+ learn_buffer_len = 0;
81803+
81804+ spin_unlock(&gr_learn_lock);
81805+
81806+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
81807+ retval = -EFAULT;
81808+
81809+ mutex_unlock(&gr_learn_user_mutex);
81810+out:
81811+ set_current_state(TASK_RUNNING);
81812+ remove_wait_queue(&learn_wait, &wait);
81813+ return retval;
81814+}
81815+
81816+static unsigned int
81817+poll_learn(struct file * file, poll_table * wait)
81818+{
81819+ poll_wait(file, &learn_wait, wait);
81820+
81821+ if (learn_buffer_len)
81822+ return (POLLIN | POLLRDNORM);
81823+
81824+ return 0;
81825+}
81826+
81827+void
81828+gr_clear_learn_entries(void)
81829+{
81830+ char *tmp;
81831+
81832+ mutex_lock(&gr_learn_user_mutex);
81833+ spin_lock(&gr_learn_lock);
81834+ tmp = learn_buffer;
81835+ learn_buffer = NULL;
81836+ spin_unlock(&gr_learn_lock);
81837+ if (tmp)
81838+ vfree(tmp);
81839+ if (learn_buffer_user != NULL) {
81840+ vfree(learn_buffer_user);
81841+ learn_buffer_user = NULL;
81842+ }
81843+ learn_buffer_len = 0;
81844+ mutex_unlock(&gr_learn_user_mutex);
81845+
81846+ return;
81847+}
81848+
81849+void
81850+gr_add_learn_entry(const char *fmt, ...)
81851+{
81852+ va_list args;
81853+ unsigned int len;
81854+
81855+ if (!gr_learn_attached)
81856+ return;
81857+
81858+ spin_lock(&gr_learn_lock);
81859+
81860+ /* leave a gap at the end so we know when it's "full" but don't have to
81861+ compute the exact length of the string we're trying to append
81862+ */
81863+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
81864+ spin_unlock(&gr_learn_lock);
81865+ wake_up_interruptible(&learn_wait);
81866+ return;
81867+ }
81868+ if (learn_buffer == NULL) {
81869+ spin_unlock(&gr_learn_lock);
81870+ return;
81871+ }
81872+
81873+ va_start(args, fmt);
81874+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
81875+ va_end(args);
81876+
81877+ learn_buffer_len += len + 1;
81878+
81879+ spin_unlock(&gr_learn_lock);
81880+ wake_up_interruptible(&learn_wait);
81881+
81882+ return;
81883+}
81884+
81885+static int
81886+open_learn(struct inode *inode, struct file *file)
81887+{
81888+ if (file->f_mode & FMODE_READ && gr_learn_attached)
81889+ return -EBUSY;
81890+ if (file->f_mode & FMODE_READ) {
81891+ int retval = 0;
81892+ mutex_lock(&gr_learn_user_mutex);
81893+ if (learn_buffer == NULL)
81894+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
81895+ if (learn_buffer_user == NULL)
81896+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
81897+ if (learn_buffer == NULL) {
81898+ retval = -ENOMEM;
81899+ goto out_error;
81900+ }
81901+ if (learn_buffer_user == NULL) {
81902+ retval = -ENOMEM;
81903+ goto out_error;
81904+ }
81905+ learn_buffer_len = 0;
81906+ learn_buffer_user_len = 0;
81907+ gr_learn_attached = 1;
81908+out_error:
81909+ mutex_unlock(&gr_learn_user_mutex);
81910+ return retval;
81911+ }
81912+ return 0;
81913+}
81914+
81915+static int
81916+close_learn(struct inode *inode, struct file *file)
81917+{
81918+ if (file->f_mode & FMODE_READ) {
81919+ char *tmp = NULL;
81920+ mutex_lock(&gr_learn_user_mutex);
81921+ spin_lock(&gr_learn_lock);
81922+ tmp = learn_buffer;
81923+ learn_buffer = NULL;
81924+ spin_unlock(&gr_learn_lock);
81925+ if (tmp)
81926+ vfree(tmp);
81927+ if (learn_buffer_user != NULL) {
81928+ vfree(learn_buffer_user);
81929+ learn_buffer_user = NULL;
81930+ }
81931+ learn_buffer_len = 0;
81932+ learn_buffer_user_len = 0;
81933+ gr_learn_attached = 0;
81934+ mutex_unlock(&gr_learn_user_mutex);
81935+ }
81936+
81937+ return 0;
81938+}
81939+
81940+const struct file_operations grsec_fops = {
81941+ .read = read_learn,
81942+ .write = write_grsec_handler,
81943+ .open = open_learn,
81944+ .release = close_learn,
81945+ .poll = poll_learn,
81946+};
81947diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
81948new file mode 100644
81949index 0000000..fd26052
81950--- /dev/null
81951+++ b/grsecurity/gracl_policy.c
81952@@ -0,0 +1,1781 @@
81953+#include <linux/kernel.h>
81954+#include <linux/module.h>
81955+#include <linux/sched.h>
81956+#include <linux/mm.h>
81957+#include <linux/file.h>
81958+#include <linux/fs.h>
81959+#include <linux/namei.h>
81960+#include <linux/mount.h>
81961+#include <linux/tty.h>
81962+#include <linux/proc_fs.h>
81963+#include <linux/lglock.h>
81964+#include <linux/slab.h>
81965+#include <linux/vmalloc.h>
81966+#include <linux/types.h>
81967+#include <linux/sysctl.h>
81968+#include <linux/netdevice.h>
81969+#include <linux/ptrace.h>
81970+#include <linux/gracl.h>
81971+#include <linux/gralloc.h>
81972+#include <linux/security.h>
81973+#include <linux/grinternal.h>
81974+#include <linux/pid_namespace.h>
81975+#include <linux/stop_machine.h>
81976+#include <linux/fdtable.h>
81977+#include <linux/percpu.h>
81978+#include <linux/lglock.h>
81979+#include <linux/hugetlb.h>
81980+#include <linux/posix-timers.h>
81981+#include "../fs/mount.h"
81982+
81983+#include <asm/uaccess.h>
81984+#include <asm/errno.h>
81985+#include <asm/mman.h>
81986+
81987+extern struct gr_policy_state *polstate;
81988+
81989+#define FOR_EACH_ROLE_START(role) \
81990+ role = polstate->role_list; \
81991+ while (role) {
81992+
81993+#define FOR_EACH_ROLE_END(role) \
81994+ role = role->prev; \
81995+ }
81996+
81997+struct path gr_real_root;
81998+
81999+extern struct gr_alloc_state *current_alloc_state;
82000+
82001+u16 acl_sp_role_value;
82002+
82003+static DEFINE_MUTEX(gr_dev_mutex);
82004+
82005+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
82006+extern void gr_clear_learn_entries(void);
82007+
82008+struct gr_arg *gr_usermode __read_only;
82009+unsigned char *gr_system_salt __read_only;
82010+unsigned char *gr_system_sum __read_only;
82011+
82012+static unsigned int gr_auth_attempts = 0;
82013+static unsigned long gr_auth_expires = 0UL;
82014+
82015+struct acl_object_label *fakefs_obj_rw;
82016+struct acl_object_label *fakefs_obj_rwx;
82017+
82018+extern int gr_init_uidset(void);
82019+extern void gr_free_uidset(void);
82020+extern void gr_remove_uid(uid_t uid);
82021+extern int gr_find_uid(uid_t uid);
82022+
82023+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback);
82024+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
82025+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
82026+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
82027+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
82028+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
82029+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
82030+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
82031+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
82032+extern struct acl_subject_label *lookup_acl_subj_label(const u64 ino, const dev_t dev, const struct acl_role_label *role);
82033+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev, const struct acl_role_label *role);
82034+extern void assign_special_role(const char *rolename);
82035+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
82036+extern int gr_rbac_disable(void *unused);
82037+extern void gr_enable_rbac_system(void);
82038+
82039+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
82040+{
82041+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
82042+ return -EFAULT;
82043+
82044+ return 0;
82045+}
82046+
82047+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
82048+{
82049+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
82050+ return -EFAULT;
82051+
82052+ return 0;
82053+}
82054+
82055+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
82056+{
82057+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
82058+ return -EFAULT;
82059+
82060+ return 0;
82061+}
82062+
82063+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
82064+{
82065+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
82066+ return -EFAULT;
82067+
82068+ return 0;
82069+}
82070+
82071+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
82072+{
82073+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
82074+ return -EFAULT;
82075+
82076+ return 0;
82077+}
82078+
82079+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
82080+{
82081+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
82082+ return -EFAULT;
82083+
82084+ return 0;
82085+}
82086+
82087+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
82088+{
82089+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
82090+ return -EFAULT;
82091+
82092+ return 0;
82093+}
82094+
82095+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
82096+{
82097+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
82098+ return -EFAULT;
82099+
82100+ return 0;
82101+}
82102+
82103+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
82104+{
82105+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
82106+ return -EFAULT;
82107+
82108+ return 0;
82109+}
82110+
82111+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
82112+{
82113+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
82114+ return -EFAULT;
82115+
82116+ if ((uwrap->version != GRSECURITY_VERSION) ||
82117+ (uwrap->size != sizeof(struct gr_arg)))
82118+ return -EINVAL;
82119+
82120+ return 0;
82121+}
82122+
82123+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
82124+{
82125+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
82126+ return -EFAULT;
82127+
82128+ return 0;
82129+}
82130+
82131+static size_t get_gr_arg_wrapper_size_normal(void)
82132+{
82133+ return sizeof(struct gr_arg_wrapper);
82134+}
82135+
82136+#ifdef CONFIG_COMPAT
82137+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
82138+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
82139+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
82140+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
82141+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
82142+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
82143+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
82144+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
82145+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
82146+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
82147+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
82148+extern size_t get_gr_arg_wrapper_size_compat(void);
82149+
82150+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
82151+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
82152+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
82153+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
82154+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
82155+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
82156+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
82157+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
82158+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
82159+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
82160+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
82161+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
82162+
82163+#else
82164+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
82165+#define copy_gr_arg copy_gr_arg_normal
82166+#define copy_gr_hash_struct copy_gr_hash_struct_normal
82167+#define copy_acl_object_label copy_acl_object_label_normal
82168+#define copy_acl_subject_label copy_acl_subject_label_normal
82169+#define copy_acl_role_label copy_acl_role_label_normal
82170+#define copy_acl_ip_label copy_acl_ip_label_normal
82171+#define copy_pointer_from_array copy_pointer_from_array_normal
82172+#define copy_sprole_pw copy_sprole_pw_normal
82173+#define copy_role_transition copy_role_transition_normal
82174+#define copy_role_allowed_ip copy_role_allowed_ip_normal
82175+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
82176+#endif
82177+
82178+static struct acl_subject_label *
82179+lookup_subject_map(const struct acl_subject_label *userp)
82180+{
82181+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
82182+ struct subject_map *match;
82183+
82184+ match = polstate->subj_map_set.s_hash[index];
82185+
82186+ while (match && match->user != userp)
82187+ match = match->next;
82188+
82189+ if (match != NULL)
82190+ return match->kernel;
82191+ else
82192+ return NULL;
82193+}
82194+
82195+static void
82196+insert_subj_map_entry(struct subject_map *subjmap)
82197+{
82198+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
82199+ struct subject_map **curr;
82200+
82201+ subjmap->prev = NULL;
82202+
82203+ curr = &polstate->subj_map_set.s_hash[index];
82204+ if (*curr != NULL)
82205+ (*curr)->prev = subjmap;
82206+
82207+ subjmap->next = *curr;
82208+ *curr = subjmap;
82209+
82210+ return;
82211+}
82212+
82213+static void
82214+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
82215+{
82216+ unsigned int index =
82217+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
82218+ struct acl_role_label **curr;
82219+ struct acl_role_label *tmp, *tmp2;
82220+
82221+ curr = &polstate->acl_role_set.r_hash[index];
82222+
82223+ /* simple case, slot is empty, just set it to our role */
82224+ if (*curr == NULL) {
82225+ *curr = role;
82226+ } else {
82227+ /* example:
82228+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
82229+ 2 -> 3
82230+ */
82231+ /* first check to see if we can already be reached via this slot */
82232+ tmp = *curr;
82233+ while (tmp && tmp != role)
82234+ tmp = tmp->next;
82235+ if (tmp == role) {
82236+ /* we don't need to add ourselves to this slot's chain */
82237+ return;
82238+ }
82239+ /* we need to add ourselves to this chain, two cases */
82240+ if (role->next == NULL) {
82241+ /* simple case, append the current chain to our role */
82242+ role->next = *curr;
82243+ *curr = role;
82244+ } else {
82245+ /* 1 -> 2 -> 3 -> 4
82246+ 2 -> 3 -> 4
82247+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
82248+ */
82249+ /* trickier case: walk our role's chain until we find
82250+ the role for the start of the current slot's chain */
82251+ tmp = role;
82252+ tmp2 = *curr;
82253+ while (tmp->next && tmp->next != tmp2)
82254+ tmp = tmp->next;
82255+ if (tmp->next == tmp2) {
82256+ /* from example above, we found 3, so just
82257+ replace this slot's chain with ours */
82258+ *curr = role;
82259+ } else {
82260+ /* we didn't find a subset of our role's chain
82261+ in the current slot's chain, so append their
82262+ chain to ours, and set us as the first role in
82263+ the slot's chain
82264+
82265+ we could fold this case with the case above,
82266+ but making it explicit for clarity
82267+ */
82268+ tmp->next = tmp2;
82269+ *curr = role;
82270+ }
82271+ }
82272+ }
82273+
82274+ return;
82275+}
82276+
82277+static void
82278+insert_acl_role_label(struct acl_role_label *role)
82279+{
82280+ int i;
82281+
82282+ if (polstate->role_list == NULL) {
82283+ polstate->role_list = role;
82284+ role->prev = NULL;
82285+ } else {
82286+ role->prev = polstate->role_list;
82287+ polstate->role_list = role;
82288+ }
82289+
82290+ /* used for hash chains */
82291+ role->next = NULL;
82292+
82293+ if (role->roletype & GR_ROLE_DOMAIN) {
82294+ for (i = 0; i < role->domain_child_num; i++)
82295+ __insert_acl_role_label(role, role->domain_children[i]);
82296+ } else
82297+ __insert_acl_role_label(role, role->uidgid);
82298+}
82299+
82300+static int
82301+insert_name_entry(char *name, const u64 inode, const dev_t device, __u8 deleted)
82302+{
82303+ struct name_entry **curr, *nentry;
82304+ struct inodev_entry *ientry;
82305+ unsigned int len = strlen(name);
82306+ unsigned int key = full_name_hash(name, len);
82307+ unsigned int index = key % polstate->name_set.n_size;
82308+
82309+ curr = &polstate->name_set.n_hash[index];
82310+
82311+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
82312+ curr = &((*curr)->next);
82313+
82314+ if (*curr != NULL)
82315+ return 1;
82316+
82317+ nentry = acl_alloc(sizeof (struct name_entry));
82318+ if (nentry == NULL)
82319+ return 0;
82320+ ientry = acl_alloc(sizeof (struct inodev_entry));
82321+ if (ientry == NULL)
82322+ return 0;
82323+ ientry->nentry = nentry;
82324+
82325+ nentry->key = key;
82326+ nentry->name = name;
82327+ nentry->inode = inode;
82328+ nentry->device = device;
82329+ nentry->len = len;
82330+ nentry->deleted = deleted;
82331+
82332+ nentry->prev = NULL;
82333+ curr = &polstate->name_set.n_hash[index];
82334+ if (*curr != NULL)
82335+ (*curr)->prev = nentry;
82336+ nentry->next = *curr;
82337+ *curr = nentry;
82338+
82339+ /* insert us into the table searchable by inode/dev */
82340+ __insert_inodev_entry(polstate, ientry);
82341+
82342+ return 1;
82343+}
82344+
82345+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
82346+
82347+static void *
82348+create_table(__u32 * len, int elementsize)
82349+{
82350+ unsigned int table_sizes[] = {
82351+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
82352+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
82353+ 4194301, 8388593, 16777213, 33554393, 67108859
82354+ };
82355+ void *newtable = NULL;
82356+ unsigned int pwr = 0;
82357+
82358+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
82359+ table_sizes[pwr] <= *len)
82360+ pwr++;
82361+
82362+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
82363+ return newtable;
82364+
82365+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
82366+ newtable =
82367+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
82368+ else
82369+ newtable = vmalloc(table_sizes[pwr] * elementsize);
82370+
82371+ *len = table_sizes[pwr];
82372+
82373+ return newtable;
82374+}
82375+
82376+static int
82377+init_variables(const struct gr_arg *arg, bool reload)
82378+{
82379+ struct task_struct *reaper = init_pid_ns.child_reaper;
82380+ unsigned int stacksize;
82381+
82382+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
82383+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
82384+ polstate->name_set.n_size = arg->role_db.num_objects;
82385+ polstate->inodev_set.i_size = arg->role_db.num_objects;
82386+
82387+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
82388+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
82389+ return 1;
82390+
82391+ if (!reload) {
82392+ if (!gr_init_uidset())
82393+ return 1;
82394+ }
82395+
82396+ /* set up the stack that holds allocation info */
82397+
82398+ stacksize = arg->role_db.num_pointers + 5;
82399+
82400+ if (!acl_alloc_stack_init(stacksize))
82401+ return 1;
82402+
82403+ if (!reload) {
82404+ /* grab reference for the real root dentry and vfsmount */
82405+ get_fs_root(reaper->fs, &gr_real_root);
82406+
82407+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
82408+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
82409+#endif
82410+
82411+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
82412+ if (fakefs_obj_rw == NULL)
82413+ return 1;
82414+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
82415+
82416+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
82417+ if (fakefs_obj_rwx == NULL)
82418+ return 1;
82419+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
82420+ }
82421+
82422+ polstate->subj_map_set.s_hash =
82423+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
82424+ polstate->acl_role_set.r_hash =
82425+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
82426+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
82427+ polstate->inodev_set.i_hash =
82428+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
82429+
82430+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
82431+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
82432+ return 1;
82433+
82434+ memset(polstate->subj_map_set.s_hash, 0,
82435+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
82436+ memset(polstate->acl_role_set.r_hash, 0,
82437+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
82438+ memset(polstate->name_set.n_hash, 0,
82439+ sizeof (struct name_entry *) * polstate->name_set.n_size);
82440+ memset(polstate->inodev_set.i_hash, 0,
82441+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
82442+
82443+ return 0;
82444+}
82445+
82446+/* free information not needed after startup
82447+ currently contains user->kernel pointer mappings for subjects
82448+*/
82449+
82450+static void
82451+free_init_variables(void)
82452+{
82453+ __u32 i;
82454+
82455+ if (polstate->subj_map_set.s_hash) {
82456+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
82457+ if (polstate->subj_map_set.s_hash[i]) {
82458+ kfree(polstate->subj_map_set.s_hash[i]);
82459+ polstate->subj_map_set.s_hash[i] = NULL;
82460+ }
82461+ }
82462+
82463+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
82464+ PAGE_SIZE)
82465+ kfree(polstate->subj_map_set.s_hash);
82466+ else
82467+ vfree(polstate->subj_map_set.s_hash);
82468+ }
82469+
82470+ return;
82471+}
82472+
82473+static void
82474+free_variables(bool reload)
82475+{
82476+ struct acl_subject_label *s;
82477+ struct acl_role_label *r;
82478+ struct task_struct *task, *task2;
82479+ unsigned int x;
82480+
82481+ if (!reload) {
82482+ gr_clear_learn_entries();
82483+
82484+ read_lock(&tasklist_lock);
82485+ do_each_thread(task2, task) {
82486+ task->acl_sp_role = 0;
82487+ task->acl_role_id = 0;
82488+ task->inherited = 0;
82489+ task->acl = NULL;
82490+ task->role = NULL;
82491+ } while_each_thread(task2, task);
82492+ read_unlock(&tasklist_lock);
82493+
82494+ kfree(fakefs_obj_rw);
82495+ fakefs_obj_rw = NULL;
82496+ kfree(fakefs_obj_rwx);
82497+ fakefs_obj_rwx = NULL;
82498+
82499+ /* release the reference to the real root dentry and vfsmount */
82500+ path_put(&gr_real_root);
82501+ memset(&gr_real_root, 0, sizeof(gr_real_root));
82502+ }
82503+
82504+ /* free all object hash tables */
82505+
82506+ FOR_EACH_ROLE_START(r)
82507+ if (r->subj_hash == NULL)
82508+ goto next_role;
82509+ FOR_EACH_SUBJECT_START(r, s, x)
82510+ if (s->obj_hash == NULL)
82511+ break;
82512+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
82513+ kfree(s->obj_hash);
82514+ else
82515+ vfree(s->obj_hash);
82516+ FOR_EACH_SUBJECT_END(s, x)
82517+ FOR_EACH_NESTED_SUBJECT_START(r, s)
82518+ if (s->obj_hash == NULL)
82519+ break;
82520+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
82521+ kfree(s->obj_hash);
82522+ else
82523+ vfree(s->obj_hash);
82524+ FOR_EACH_NESTED_SUBJECT_END(s)
82525+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
82526+ kfree(r->subj_hash);
82527+ else
82528+ vfree(r->subj_hash);
82529+ r->subj_hash = NULL;
82530+next_role:
82531+ FOR_EACH_ROLE_END(r)
82532+
82533+ acl_free_all();
82534+
82535+ if (polstate->acl_role_set.r_hash) {
82536+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
82537+ PAGE_SIZE)
82538+ kfree(polstate->acl_role_set.r_hash);
82539+ else
82540+ vfree(polstate->acl_role_set.r_hash);
82541+ }
82542+ if (polstate->name_set.n_hash) {
82543+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
82544+ PAGE_SIZE)
82545+ kfree(polstate->name_set.n_hash);
82546+ else
82547+ vfree(polstate->name_set.n_hash);
82548+ }
82549+
82550+ if (polstate->inodev_set.i_hash) {
82551+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
82552+ PAGE_SIZE)
82553+ kfree(polstate->inodev_set.i_hash);
82554+ else
82555+ vfree(polstate->inodev_set.i_hash);
82556+ }
82557+
82558+ if (!reload)
82559+ gr_free_uidset();
82560+
82561+ memset(&polstate->name_set, 0, sizeof (struct name_db));
82562+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
82563+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
82564+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
82565+
82566+ polstate->default_role = NULL;
82567+ polstate->kernel_role = NULL;
82568+ polstate->role_list = NULL;
82569+
82570+ return;
82571+}
82572+
82573+static struct acl_subject_label *
82574+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
82575+
82576+static int alloc_and_copy_string(char **name, unsigned int maxlen)
82577+{
82578+ unsigned int len = strnlen_user(*name, maxlen);
82579+ char *tmp;
82580+
82581+ if (!len || len >= maxlen)
82582+ return -EINVAL;
82583+
82584+ if ((tmp = (char *) acl_alloc(len)) == NULL)
82585+ return -ENOMEM;
82586+
82587+ if (copy_from_user(tmp, *name, len))
82588+ return -EFAULT;
82589+
82590+ tmp[len-1] = '\0';
82591+ *name = tmp;
82592+
82593+ return 0;
82594+}
82595+
82596+static int
82597+copy_user_glob(struct acl_object_label *obj)
82598+{
82599+ struct acl_object_label *g_tmp, **guser;
82600+ int error;
82601+
82602+ if (obj->globbed == NULL)
82603+ return 0;
82604+
82605+ guser = &obj->globbed;
82606+ while (*guser) {
82607+ g_tmp = (struct acl_object_label *)
82608+ acl_alloc(sizeof (struct acl_object_label));
82609+ if (g_tmp == NULL)
82610+ return -ENOMEM;
82611+
82612+ if (copy_acl_object_label(g_tmp, *guser))
82613+ return -EFAULT;
82614+
82615+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
82616+ if (error)
82617+ return error;
82618+
82619+ *guser = g_tmp;
82620+ guser = &(g_tmp->next);
82621+ }
82622+
82623+ return 0;
82624+}
82625+
82626+static int
82627+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
82628+ struct acl_role_label *role)
82629+{
82630+ struct acl_object_label *o_tmp;
82631+ int ret;
82632+
82633+ while (userp) {
82634+ if ((o_tmp = (struct acl_object_label *)
82635+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
82636+ return -ENOMEM;
82637+
82638+ if (copy_acl_object_label(o_tmp, userp))
82639+ return -EFAULT;
82640+
82641+ userp = o_tmp->prev;
82642+
82643+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
82644+ if (ret)
82645+ return ret;
82646+
82647+ insert_acl_obj_label(o_tmp, subj);
82648+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
82649+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
82650+ return -ENOMEM;
82651+
82652+ ret = copy_user_glob(o_tmp);
82653+ if (ret)
82654+ return ret;
82655+
82656+ if (o_tmp->nested) {
82657+ int already_copied;
82658+
82659+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
82660+ if (IS_ERR(o_tmp->nested))
82661+ return PTR_ERR(o_tmp->nested);
82662+
82663+ /* insert into nested subject list if we haven't copied this one yet
82664+ to prevent duplicate entries */
82665+ if (!already_copied) {
82666+ o_tmp->nested->next = role->hash->first;
82667+ role->hash->first = o_tmp->nested;
82668+ }
82669+ }
82670+ }
82671+
82672+ return 0;
82673+}
82674+
82675+static __u32
82676+count_user_subjs(struct acl_subject_label *userp)
82677+{
82678+ struct acl_subject_label s_tmp;
82679+ __u32 num = 0;
82680+
82681+ while (userp) {
82682+ if (copy_acl_subject_label(&s_tmp, userp))
82683+ break;
82684+
82685+ userp = s_tmp.prev;
82686+ }
82687+
82688+ return num;
82689+}
82690+
82691+static int
82692+copy_user_allowedips(struct acl_role_label *rolep)
82693+{
82694+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
82695+
82696+ ruserip = rolep->allowed_ips;
82697+
82698+ while (ruserip) {
82699+ rlast = rtmp;
82700+
82701+ if ((rtmp = (struct role_allowed_ip *)
82702+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
82703+ return -ENOMEM;
82704+
82705+ if (copy_role_allowed_ip(rtmp, ruserip))
82706+ return -EFAULT;
82707+
82708+ ruserip = rtmp->prev;
82709+
82710+ if (!rlast) {
82711+ rtmp->prev = NULL;
82712+ rolep->allowed_ips = rtmp;
82713+ } else {
82714+ rlast->next = rtmp;
82715+ rtmp->prev = rlast;
82716+ }
82717+
82718+ if (!ruserip)
82719+ rtmp->next = NULL;
82720+ }
82721+
82722+ return 0;
82723+}
82724+
82725+static int
82726+copy_user_transitions(struct acl_role_label *rolep)
82727+{
82728+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
82729+ int error;
82730+
82731+ rusertp = rolep->transitions;
82732+
82733+ while (rusertp) {
82734+ rlast = rtmp;
82735+
82736+ if ((rtmp = (struct role_transition *)
82737+ acl_alloc(sizeof (struct role_transition))) == NULL)
82738+ return -ENOMEM;
82739+
82740+ if (copy_role_transition(rtmp, rusertp))
82741+ return -EFAULT;
82742+
82743+ rusertp = rtmp->prev;
82744+
82745+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
82746+ if (error)
82747+ return error;
82748+
82749+ if (!rlast) {
82750+ rtmp->prev = NULL;
82751+ rolep->transitions = rtmp;
82752+ } else {
82753+ rlast->next = rtmp;
82754+ rtmp->prev = rlast;
82755+ }
82756+
82757+ if (!rusertp)
82758+ rtmp->next = NULL;
82759+ }
82760+
82761+ return 0;
82762+}
82763+
82764+static __u32 count_user_objs(const struct acl_object_label __user *userp)
82765+{
82766+ struct acl_object_label o_tmp;
82767+ __u32 num = 0;
82768+
82769+ while (userp) {
82770+ if (copy_acl_object_label(&o_tmp, userp))
82771+ break;
82772+
82773+ userp = o_tmp.prev;
82774+ num++;
82775+ }
82776+
82777+ return num;
82778+}
82779+
82780+static struct acl_subject_label *
82781+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
82782+{
82783+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
82784+ __u32 num_objs;
82785+ struct acl_ip_label **i_tmp, *i_utmp2;
82786+ struct gr_hash_struct ghash;
82787+ struct subject_map *subjmap;
82788+ unsigned int i_num;
82789+ int err;
82790+
82791+ if (already_copied != NULL)
82792+ *already_copied = 0;
82793+
82794+ s_tmp = lookup_subject_map(userp);
82795+
82796+ /* we've already copied this subject into the kernel, just return
82797+ the reference to it, and don't copy it over again
82798+ */
82799+ if (s_tmp) {
82800+ if (already_copied != NULL)
82801+ *already_copied = 1;
82802+ return(s_tmp);
82803+ }
82804+
82805+ if ((s_tmp = (struct acl_subject_label *)
82806+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
82807+ return ERR_PTR(-ENOMEM);
82808+
82809+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
82810+ if (subjmap == NULL)
82811+ return ERR_PTR(-ENOMEM);
82812+
82813+ subjmap->user = userp;
82814+ subjmap->kernel = s_tmp;
82815+ insert_subj_map_entry(subjmap);
82816+
82817+ if (copy_acl_subject_label(s_tmp, userp))
82818+ return ERR_PTR(-EFAULT);
82819+
82820+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
82821+ if (err)
82822+ return ERR_PTR(err);
82823+
82824+ if (!strcmp(s_tmp->filename, "/"))
82825+ role->root_label = s_tmp;
82826+
82827+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
82828+ return ERR_PTR(-EFAULT);
82829+
82830+ /* copy user and group transition tables */
82831+
82832+ if (s_tmp->user_trans_num) {
82833+ uid_t *uidlist;
82834+
82835+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
82836+ if (uidlist == NULL)
82837+ return ERR_PTR(-ENOMEM);
82838+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
82839+ return ERR_PTR(-EFAULT);
82840+
82841+ s_tmp->user_transitions = uidlist;
82842+ }
82843+
82844+ if (s_tmp->group_trans_num) {
82845+ gid_t *gidlist;
82846+
82847+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
82848+ if (gidlist == NULL)
82849+ return ERR_PTR(-ENOMEM);
82850+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
82851+ return ERR_PTR(-EFAULT);
82852+
82853+ s_tmp->group_transitions = gidlist;
82854+ }
82855+
82856+ /* set up object hash table */
82857+ num_objs = count_user_objs(ghash.first);
82858+
82859+ s_tmp->obj_hash_size = num_objs;
82860+ s_tmp->obj_hash =
82861+ (struct acl_object_label **)
82862+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
82863+
82864+ if (!s_tmp->obj_hash)
82865+ return ERR_PTR(-ENOMEM);
82866+
82867+ memset(s_tmp->obj_hash, 0,
82868+ s_tmp->obj_hash_size *
82869+ sizeof (struct acl_object_label *));
82870+
82871+ /* add in objects */
82872+ err = copy_user_objs(ghash.first, s_tmp, role);
82873+
82874+ if (err)
82875+ return ERR_PTR(err);
82876+
82877+ /* set pointer for parent subject */
82878+ if (s_tmp->parent_subject) {
82879+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
82880+
82881+ if (IS_ERR(s_tmp2))
82882+ return s_tmp2;
82883+
82884+ s_tmp->parent_subject = s_tmp2;
82885+ }
82886+
82887+ /* add in ip acls */
82888+
82889+ if (!s_tmp->ip_num) {
82890+ s_tmp->ips = NULL;
82891+ goto insert;
82892+ }
82893+
82894+ i_tmp =
82895+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
82896+ sizeof (struct acl_ip_label *));
82897+
82898+ if (!i_tmp)
82899+ return ERR_PTR(-ENOMEM);
82900+
82901+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
82902+ *(i_tmp + i_num) =
82903+ (struct acl_ip_label *)
82904+ acl_alloc(sizeof (struct acl_ip_label));
82905+ if (!*(i_tmp + i_num))
82906+ return ERR_PTR(-ENOMEM);
82907+
82908+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
82909+ return ERR_PTR(-EFAULT);
82910+
82911+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
82912+ return ERR_PTR(-EFAULT);
82913+
82914+ if ((*(i_tmp + i_num))->iface == NULL)
82915+ continue;
82916+
82917+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
82918+ if (err)
82919+ return ERR_PTR(err);
82920+ }
82921+
82922+ s_tmp->ips = i_tmp;
82923+
82924+insert:
82925+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
82926+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
82927+ return ERR_PTR(-ENOMEM);
82928+
82929+ return s_tmp;
82930+}
82931+
82932+static int
82933+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
82934+{
82935+ struct acl_subject_label s_pre;
82936+ struct acl_subject_label * ret;
82937+ int err;
82938+
82939+ while (userp) {
82940+ if (copy_acl_subject_label(&s_pre, userp))
82941+ return -EFAULT;
82942+
82943+ ret = do_copy_user_subj(userp, role, NULL);
82944+
82945+ err = PTR_ERR(ret);
82946+ if (IS_ERR(ret))
82947+ return err;
82948+
82949+ insert_acl_subj_label(ret, role);
82950+
82951+ userp = s_pre.prev;
82952+ }
82953+
82954+ return 0;
82955+}
82956+
82957+static int
82958+copy_user_acl(struct gr_arg *arg)
82959+{
82960+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
82961+ struct acl_subject_label *subj_list;
82962+ struct sprole_pw *sptmp;
82963+ struct gr_hash_struct *ghash;
82964+ uid_t *domainlist;
82965+ unsigned int r_num;
82966+ int err = 0;
82967+ __u16 i;
82968+ __u32 num_subjs;
82969+
82970+ /* we need a default and kernel role */
82971+ if (arg->role_db.num_roles < 2)
82972+ return -EINVAL;
82973+
82974+ /* copy special role authentication info from userspace */
82975+
82976+ polstate->num_sprole_pws = arg->num_sprole_pws;
82977+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
82978+
82979+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
82980+ return -ENOMEM;
82981+
82982+ for (i = 0; i < polstate->num_sprole_pws; i++) {
82983+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
82984+ if (!sptmp)
82985+ return -ENOMEM;
82986+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
82987+ return -EFAULT;
82988+
82989+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
82990+ if (err)
82991+ return err;
82992+
82993+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
82994+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
82995+#endif
82996+
82997+ polstate->acl_special_roles[i] = sptmp;
82998+ }
82999+
83000+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
83001+
83002+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
83003+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
83004+
83005+ if (!r_tmp)
83006+ return -ENOMEM;
83007+
83008+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
83009+ return -EFAULT;
83010+
83011+ if (copy_acl_role_label(r_tmp, r_utmp2))
83012+ return -EFAULT;
83013+
83014+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
83015+ if (err)
83016+ return err;
83017+
83018+ if (!strcmp(r_tmp->rolename, "default")
83019+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
83020+ polstate->default_role = r_tmp;
83021+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
83022+ polstate->kernel_role = r_tmp;
83023+ }
83024+
83025+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
83026+ return -ENOMEM;
83027+
83028+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
83029+ return -EFAULT;
83030+
83031+ r_tmp->hash = ghash;
83032+
83033+ num_subjs = count_user_subjs(r_tmp->hash->first);
83034+
83035+ r_tmp->subj_hash_size = num_subjs;
83036+ r_tmp->subj_hash =
83037+ (struct acl_subject_label **)
83038+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
83039+
83040+ if (!r_tmp->subj_hash)
83041+ return -ENOMEM;
83042+
83043+ err = copy_user_allowedips(r_tmp);
83044+ if (err)
83045+ return err;
83046+
83047+ /* copy domain info */
83048+ if (r_tmp->domain_children != NULL) {
83049+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
83050+ if (domainlist == NULL)
83051+ return -ENOMEM;
83052+
83053+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
83054+ return -EFAULT;
83055+
83056+ r_tmp->domain_children = domainlist;
83057+ }
83058+
83059+ err = copy_user_transitions(r_tmp);
83060+ if (err)
83061+ return err;
83062+
83063+ memset(r_tmp->subj_hash, 0,
83064+ r_tmp->subj_hash_size *
83065+ sizeof (struct acl_subject_label *));
83066+
83067+ /* acquire the list of subjects, then NULL out
83068+ the list prior to parsing the subjects for this role,
83069+ as during this parsing the list is replaced with a list
83070+ of *nested* subjects for the role
83071+ */
83072+ subj_list = r_tmp->hash->first;
83073+
83074+ /* set nested subject list to null */
83075+ r_tmp->hash->first = NULL;
83076+
83077+ err = copy_user_subjs(subj_list, r_tmp);
83078+
83079+ if (err)
83080+ return err;
83081+
83082+ insert_acl_role_label(r_tmp);
83083+ }
83084+
83085+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
83086+ return -EINVAL;
83087+
83088+ return err;
83089+}
83090+
83091+static int gracl_reload_apply_policies(void *reload)
83092+{
83093+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
83094+ struct task_struct *task, *task2;
83095+ struct acl_role_label *role, *rtmp;
83096+ struct acl_subject_label *subj;
83097+ const struct cred *cred;
83098+ int role_applied;
83099+ int ret = 0;
83100+
83101+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
83102+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
83103+
83104+ /* first make sure we'll be able to apply the new policy cleanly */
83105+ do_each_thread(task2, task) {
83106+ if (task->exec_file == NULL)
83107+ continue;
83108+ role_applied = 0;
83109+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
83110+ /* preserve special roles */
83111+ FOR_EACH_ROLE_START(role)
83112+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
83113+ rtmp = task->role;
83114+ task->role = role;
83115+ role_applied = 1;
83116+ break;
83117+ }
83118+ FOR_EACH_ROLE_END(role)
83119+ }
83120+ if (!role_applied) {
83121+ cred = __task_cred(task);
83122+ rtmp = task->role;
83123+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
83124+ }
83125+ /* this handles non-nested inherited subjects, nested subjects will still
83126+ be dropped currently */
83127+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
83128+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL, 1);
83129+ /* change the role back so that we've made no modifications to the policy */
83130+ task->role = rtmp;
83131+
83132+ if (subj == NULL || task->tmpacl == NULL) {
83133+ ret = -EINVAL;
83134+ goto out;
83135+ }
83136+ } while_each_thread(task2, task);
83137+
83138+ /* now actually apply the policy */
83139+
83140+ do_each_thread(task2, task) {
83141+ if (task->exec_file) {
83142+ role_applied = 0;
83143+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
83144+ /* preserve special roles */
83145+ FOR_EACH_ROLE_START(role)
83146+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
83147+ task->role = role;
83148+ role_applied = 1;
83149+ break;
83150+ }
83151+ FOR_EACH_ROLE_END(role)
83152+ }
83153+ if (!role_applied) {
83154+ cred = __task_cred(task);
83155+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
83156+ }
83157+ /* this handles non-nested inherited subjects, nested subjects will still
83158+ be dropped currently */
83159+ if (!reload_state->oldmode && task->inherited)
83160+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
83161+ else {
83162+ /* looked up and tagged to the task previously */
83163+ subj = task->tmpacl;
83164+ }
83165+ /* subj will be non-null */
83166+ __gr_apply_subject_to_task(polstate, task, subj);
83167+ if (reload_state->oldmode) {
83168+ task->acl_role_id = 0;
83169+ task->acl_sp_role = 0;
83170+ task->inherited = 0;
83171+ }
83172+ } else {
83173+ // it's a kernel process
83174+ task->role = polstate->kernel_role;
83175+ task->acl = polstate->kernel_role->root_label;
83176+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
83177+ task->acl->mode &= ~GR_PROCFIND;
83178+#endif
83179+ }
83180+ } while_each_thread(task2, task);
83181+
83182+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
83183+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
83184+
83185+out:
83186+
83187+ return ret;
83188+}
83189+
83190+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
83191+{
83192+ struct gr_reload_state new_reload_state = { };
83193+ int err;
83194+
83195+ new_reload_state.oldpolicy_ptr = polstate;
83196+ new_reload_state.oldalloc_ptr = current_alloc_state;
83197+ new_reload_state.oldmode = oldmode;
83198+
83199+ current_alloc_state = &new_reload_state.newalloc;
83200+ polstate = &new_reload_state.newpolicy;
83201+
83202+ /* everything relevant is now saved off, copy in the new policy */
83203+ if (init_variables(args, true)) {
83204+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
83205+ err = -ENOMEM;
83206+ goto error;
83207+ }
83208+
83209+ err = copy_user_acl(args);
83210+ free_init_variables();
83211+ if (err)
83212+ goto error;
83213+ /* the new policy is copied in, with the old policy available via saved_state
83214+ first go through applying roles, making sure to preserve special roles
83215+ then apply new subjects, making sure to preserve inherited and nested subjects,
83216+ though currently only inherited subjects will be preserved
83217+ */
83218+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
83219+ if (err)
83220+ goto error;
83221+
83222+ /* we've now applied the new policy, so restore the old policy state to free it */
83223+ polstate = &new_reload_state.oldpolicy;
83224+ current_alloc_state = &new_reload_state.oldalloc;
83225+ free_variables(true);
83226+
83227+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
83228+ to running_polstate/current_alloc_state inside stop_machine
83229+ */
83230+ err = 0;
83231+ goto out;
83232+error:
83233+ /* on error of loading the new policy, we'll just keep the previous
83234+ policy set around
83235+ */
83236+ free_variables(true);
83237+
83238+ /* doesn't affect runtime, but maintains consistent state */
83239+out:
83240+ polstate = new_reload_state.oldpolicy_ptr;
83241+ current_alloc_state = new_reload_state.oldalloc_ptr;
83242+
83243+ return err;
83244+}
83245+
83246+static int
83247+gracl_init(struct gr_arg *args)
83248+{
83249+ int error = 0;
83250+
83251+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
83252+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
83253+
83254+ if (init_variables(args, false)) {
83255+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
83256+ error = -ENOMEM;
83257+ goto out;
83258+ }
83259+
83260+ error = copy_user_acl(args);
83261+ free_init_variables();
83262+ if (error)
83263+ goto out;
83264+
83265+ error = gr_set_acls(0);
83266+ if (error)
83267+ goto out;
83268+
83269+ gr_enable_rbac_system();
83270+
83271+ return 0;
83272+
83273+out:
83274+ free_variables(false);
83275+ return error;
83276+}
83277+
83278+static int
83279+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
83280+ unsigned char **sum)
83281+{
83282+ struct acl_role_label *r;
83283+ struct role_allowed_ip *ipp;
83284+ struct role_transition *trans;
83285+ unsigned int i;
83286+ int found = 0;
83287+ u32 curr_ip = current->signal->curr_ip;
83288+
83289+ current->signal->saved_ip = curr_ip;
83290+
83291+ /* check transition table */
83292+
83293+ for (trans = current->role->transitions; trans; trans = trans->next) {
83294+ if (!strcmp(rolename, trans->rolename)) {
83295+ found = 1;
83296+ break;
83297+ }
83298+ }
83299+
83300+ if (!found)
83301+ return 0;
83302+
83303+ /* handle special roles that do not require authentication
83304+ and check ip */
83305+
83306+ FOR_EACH_ROLE_START(r)
83307+ if (!strcmp(rolename, r->rolename) &&
83308+ (r->roletype & GR_ROLE_SPECIAL)) {
83309+ found = 0;
83310+ if (r->allowed_ips != NULL) {
83311+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
83312+ if ((ntohl(curr_ip) & ipp->netmask) ==
83313+ (ntohl(ipp->addr) & ipp->netmask))
83314+ found = 1;
83315+ }
83316+ } else
83317+ found = 2;
83318+ if (!found)
83319+ return 0;
83320+
83321+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
83322+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
83323+ *salt = NULL;
83324+ *sum = NULL;
83325+ return 1;
83326+ }
83327+ }
83328+ FOR_EACH_ROLE_END(r)
83329+
83330+ for (i = 0; i < polstate->num_sprole_pws; i++) {
83331+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
83332+ *salt = polstate->acl_special_roles[i]->salt;
83333+ *sum = polstate->acl_special_roles[i]->sum;
83334+ return 1;
83335+ }
83336+ }
83337+
83338+ return 0;
83339+}
83340+
83341+int gr_check_secure_terminal(struct task_struct *task)
83342+{
83343+ struct task_struct *p, *p2, *p3;
83344+ struct files_struct *files;
83345+ struct fdtable *fdt;
83346+ struct file *our_file = NULL, *file;
83347+ int i;
83348+
83349+ if (task->signal->tty == NULL)
83350+ return 1;
83351+
83352+ files = get_files_struct(task);
83353+ if (files != NULL) {
83354+ rcu_read_lock();
83355+ fdt = files_fdtable(files);
83356+ for (i=0; i < fdt->max_fds; i++) {
83357+ file = fcheck_files(files, i);
83358+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
83359+ get_file(file);
83360+ our_file = file;
83361+ }
83362+ }
83363+ rcu_read_unlock();
83364+ put_files_struct(files);
83365+ }
83366+
83367+ if (our_file == NULL)
83368+ return 1;
83369+
83370+ read_lock(&tasklist_lock);
83371+ do_each_thread(p2, p) {
83372+ files = get_files_struct(p);
83373+ if (files == NULL ||
83374+ (p->signal && p->signal->tty == task->signal->tty)) {
83375+ if (files != NULL)
83376+ put_files_struct(files);
83377+ continue;
83378+ }
83379+ rcu_read_lock();
83380+ fdt = files_fdtable(files);
83381+ for (i=0; i < fdt->max_fds; i++) {
83382+ file = fcheck_files(files, i);
83383+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
83384+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
83385+ p3 = task;
83386+ while (task_pid_nr(p3) > 0) {
83387+ if (p3 == p)
83388+ break;
83389+ p3 = p3->real_parent;
83390+ }
83391+ if (p3 == p)
83392+ break;
83393+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
83394+ gr_handle_alertkill(p);
83395+ rcu_read_unlock();
83396+ put_files_struct(files);
83397+ read_unlock(&tasklist_lock);
83398+ fput(our_file);
83399+ return 0;
83400+ }
83401+ }
83402+ rcu_read_unlock();
83403+ put_files_struct(files);
83404+ } while_each_thread(p2, p);
83405+ read_unlock(&tasklist_lock);
83406+
83407+ fput(our_file);
83408+ return 1;
83409+}
83410+
83411+ssize_t
83412+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
83413+{
83414+ struct gr_arg_wrapper uwrap;
83415+ unsigned char *sprole_salt = NULL;
83416+ unsigned char *sprole_sum = NULL;
83417+ int error = 0;
83418+ int error2 = 0;
83419+ size_t req_count = 0;
83420+ unsigned char oldmode = 0;
83421+
83422+ mutex_lock(&gr_dev_mutex);
83423+
83424+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
83425+ error = -EPERM;
83426+ goto out;
83427+ }
83428+
83429+#ifdef CONFIG_COMPAT
83430+ pax_open_kernel();
83431+ if (is_compat_task()) {
83432+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
83433+ copy_gr_arg = &copy_gr_arg_compat;
83434+ copy_acl_object_label = &copy_acl_object_label_compat;
83435+ copy_acl_subject_label = &copy_acl_subject_label_compat;
83436+ copy_acl_role_label = &copy_acl_role_label_compat;
83437+ copy_acl_ip_label = &copy_acl_ip_label_compat;
83438+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
83439+ copy_role_transition = &copy_role_transition_compat;
83440+ copy_sprole_pw = &copy_sprole_pw_compat;
83441+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
83442+ copy_pointer_from_array = &copy_pointer_from_array_compat;
83443+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
83444+ } else {
83445+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
83446+ copy_gr_arg = &copy_gr_arg_normal;
83447+ copy_acl_object_label = &copy_acl_object_label_normal;
83448+ copy_acl_subject_label = &copy_acl_subject_label_normal;
83449+ copy_acl_role_label = &copy_acl_role_label_normal;
83450+ copy_acl_ip_label = &copy_acl_ip_label_normal;
83451+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
83452+ copy_role_transition = &copy_role_transition_normal;
83453+ copy_sprole_pw = &copy_sprole_pw_normal;
83454+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
83455+ copy_pointer_from_array = &copy_pointer_from_array_normal;
83456+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
83457+ }
83458+ pax_close_kernel();
83459+#endif
83460+
83461+ req_count = get_gr_arg_wrapper_size();
83462+
83463+ if (count != req_count) {
83464+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
83465+ error = -EINVAL;
83466+ goto out;
83467+ }
83468+
83469+
83470+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
83471+ gr_auth_expires = 0;
83472+ gr_auth_attempts = 0;
83473+ }
83474+
83475+ error = copy_gr_arg_wrapper(buf, &uwrap);
83476+ if (error)
83477+ goto out;
83478+
83479+ error = copy_gr_arg(uwrap.arg, gr_usermode);
83480+ if (error)
83481+ goto out;
83482+
83483+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
83484+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
83485+ time_after(gr_auth_expires, get_seconds())) {
83486+ error = -EBUSY;
83487+ goto out;
83488+ }
83489+
83490+ /* if non-root trying to do anything other than use a special role,
83491+ do not attempt authentication, do not count towards authentication
83492+ locking
83493+ */
83494+
83495+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
83496+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
83497+ gr_is_global_nonroot(current_uid())) {
83498+ error = -EPERM;
83499+ goto out;
83500+ }
83501+
83502+ /* ensure pw and special role name are null terminated */
83503+
83504+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
83505+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
83506+
83507+ /* Okay.
83508+ * We have our enough of the argument structure..(we have yet
83509+ * to copy_from_user the tables themselves) . Copy the tables
83510+ * only if we need them, i.e. for loading operations. */
83511+
83512+ switch (gr_usermode->mode) {
83513+ case GR_STATUS:
83514+ if (gr_acl_is_enabled()) {
83515+ error = 1;
83516+ if (!gr_check_secure_terminal(current))
83517+ error = 3;
83518+ } else
83519+ error = 2;
83520+ goto out;
83521+ case GR_SHUTDOWN:
83522+ if (gr_acl_is_enabled() && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
83523+ stop_machine(gr_rbac_disable, NULL, NULL);
83524+ free_variables(false);
83525+ memset(gr_usermode, 0, sizeof(struct gr_arg));
83526+ memset(gr_system_salt, 0, GR_SALT_LEN);
83527+ memset(gr_system_sum, 0, GR_SHA_LEN);
83528+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
83529+ } else if (gr_acl_is_enabled()) {
83530+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
83531+ error = -EPERM;
83532+ } else {
83533+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
83534+ error = -EAGAIN;
83535+ }
83536+ break;
83537+ case GR_ENABLE:
83538+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(gr_usermode)))
83539+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
83540+ else {
83541+ if (gr_acl_is_enabled())
83542+ error = -EAGAIN;
83543+ else
83544+ error = error2;
83545+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
83546+ }
83547+ break;
83548+ case GR_OLDRELOAD:
83549+ oldmode = 1;
83550+ case GR_RELOAD:
83551+ if (!gr_acl_is_enabled()) {
83552+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
83553+ error = -EAGAIN;
83554+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
83555+ error2 = gracl_reload(gr_usermode, oldmode);
83556+ if (!error2)
83557+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
83558+ else {
83559+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
83560+ error = error2;
83561+ }
83562+ } else {
83563+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
83564+ error = -EPERM;
83565+ }
83566+ break;
83567+ case GR_SEGVMOD:
83568+ if (unlikely(!gr_acl_is_enabled())) {
83569+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
83570+ error = -EAGAIN;
83571+ break;
83572+ }
83573+
83574+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
83575+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
83576+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
83577+ struct acl_subject_label *segvacl;
83578+ segvacl =
83579+ lookup_acl_subj_label(gr_usermode->segv_inode,
83580+ gr_usermode->segv_device,
83581+ current->role);
83582+ if (segvacl) {
83583+ segvacl->crashes = 0;
83584+ segvacl->expires = 0;
83585+ }
83586+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
83587+ gr_remove_uid(gr_usermode->segv_uid);
83588+ }
83589+ } else {
83590+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
83591+ error = -EPERM;
83592+ }
83593+ break;
83594+ case GR_SPROLE:
83595+ case GR_SPROLEPAM:
83596+ if (unlikely(!gr_acl_is_enabled())) {
83597+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
83598+ error = -EAGAIN;
83599+ break;
83600+ }
83601+
83602+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
83603+ current->role->expires = 0;
83604+ current->role->auth_attempts = 0;
83605+ }
83606+
83607+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
83608+ time_after(current->role->expires, get_seconds())) {
83609+ error = -EBUSY;
83610+ goto out;
83611+ }
83612+
83613+ if (lookup_special_role_auth
83614+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
83615+ && ((!sprole_salt && !sprole_sum)
83616+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
83617+ char *p = "";
83618+ assign_special_role(gr_usermode->sp_role);
83619+ read_lock(&tasklist_lock);
83620+ if (current->real_parent)
83621+ p = current->real_parent->role->rolename;
83622+ read_unlock(&tasklist_lock);
83623+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
83624+ p, acl_sp_role_value);
83625+ } else {
83626+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
83627+ error = -EPERM;
83628+ if(!(current->role->auth_attempts++))
83629+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
83630+
83631+ goto out;
83632+ }
83633+ break;
83634+ case GR_UNSPROLE:
83635+ if (unlikely(!gr_acl_is_enabled())) {
83636+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
83637+ error = -EAGAIN;
83638+ break;
83639+ }
83640+
83641+ if (current->role->roletype & GR_ROLE_SPECIAL) {
83642+ char *p = "";
83643+ int i = 0;
83644+
83645+ read_lock(&tasklist_lock);
83646+ if (current->real_parent) {
83647+ p = current->real_parent->role->rolename;
83648+ i = current->real_parent->acl_role_id;
83649+ }
83650+ read_unlock(&tasklist_lock);
83651+
83652+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
83653+ gr_set_acls(1);
83654+ } else {
83655+ error = -EPERM;
83656+ goto out;
83657+ }
83658+ break;
83659+ default:
83660+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
83661+ error = -EINVAL;
83662+ break;
83663+ }
83664+
83665+ if (error != -EPERM)
83666+ goto out;
83667+
83668+ if(!(gr_auth_attempts++))
83669+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
83670+
83671+ out:
83672+ mutex_unlock(&gr_dev_mutex);
83673+
83674+ if (!error)
83675+ error = req_count;
83676+
83677+ return error;
83678+}
83679+
83680+int
83681+gr_set_acls(const int type)
83682+{
83683+ struct task_struct *task, *task2;
83684+ struct acl_role_label *role = current->role;
83685+ struct acl_subject_label *subj;
83686+ __u16 acl_role_id = current->acl_role_id;
83687+ const struct cred *cred;
83688+ int ret;
83689+
83690+ rcu_read_lock();
83691+ read_lock(&tasklist_lock);
83692+ read_lock(&grsec_exec_file_lock);
83693+ do_each_thread(task2, task) {
83694+ /* check to see if we're called from the exit handler,
83695+ if so, only replace ACLs that have inherited the admin
83696+ ACL */
83697+
83698+ if (type && (task->role != role ||
83699+ task->acl_role_id != acl_role_id))
83700+ continue;
83701+
83702+ task->acl_role_id = 0;
83703+ task->acl_sp_role = 0;
83704+ task->inherited = 0;
83705+
83706+ if (task->exec_file) {
83707+ cred = __task_cred(task);
83708+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
83709+ subj = __gr_get_subject_for_task(polstate, task, NULL, 1);
83710+ if (subj == NULL) {
83711+ ret = -EINVAL;
83712+ read_unlock(&grsec_exec_file_lock);
83713+ read_unlock(&tasklist_lock);
83714+ rcu_read_unlock();
83715+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
83716+ return ret;
83717+ }
83718+ __gr_apply_subject_to_task(polstate, task, subj);
83719+ } else {
83720+ // it's a kernel process
83721+ task->role = polstate->kernel_role;
83722+ task->acl = polstate->kernel_role->root_label;
83723+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
83724+ task->acl->mode &= ~GR_PROCFIND;
83725+#endif
83726+ }
83727+ } while_each_thread(task2, task);
83728+ read_unlock(&grsec_exec_file_lock);
83729+ read_unlock(&tasklist_lock);
83730+ rcu_read_unlock();
83731+
83732+ return 0;
83733+}
83734diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
83735new file mode 100644
83736index 0000000..39645c9
83737--- /dev/null
83738+++ b/grsecurity/gracl_res.c
83739@@ -0,0 +1,68 @@
83740+#include <linux/kernel.h>
83741+#include <linux/sched.h>
83742+#include <linux/gracl.h>
83743+#include <linux/grinternal.h>
83744+
83745+static const char *restab_log[] = {
83746+ [RLIMIT_CPU] = "RLIMIT_CPU",
83747+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
83748+ [RLIMIT_DATA] = "RLIMIT_DATA",
83749+ [RLIMIT_STACK] = "RLIMIT_STACK",
83750+ [RLIMIT_CORE] = "RLIMIT_CORE",
83751+ [RLIMIT_RSS] = "RLIMIT_RSS",
83752+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
83753+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
83754+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
83755+ [RLIMIT_AS] = "RLIMIT_AS",
83756+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
83757+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
83758+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
83759+ [RLIMIT_NICE] = "RLIMIT_NICE",
83760+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
83761+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
83762+ [GR_CRASH_RES] = "RLIMIT_CRASH"
83763+};
83764+
83765+void
83766+gr_log_resource(const struct task_struct *task,
83767+ const int res, const unsigned long wanted, const int gt)
83768+{
83769+ const struct cred *cred;
83770+ unsigned long rlim;
83771+
83772+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
83773+ return;
83774+
83775+ // not yet supported resource
83776+ if (unlikely(!restab_log[res]))
83777+ return;
83778+
83779+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
83780+ rlim = task_rlimit_max(task, res);
83781+ else
83782+ rlim = task_rlimit(task, res);
83783+
83784+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
83785+ return;
83786+
83787+ rcu_read_lock();
83788+ cred = __task_cred(task);
83789+
83790+ if (res == RLIMIT_NPROC &&
83791+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
83792+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
83793+ goto out_rcu_unlock;
83794+ else if (res == RLIMIT_MEMLOCK &&
83795+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
83796+ goto out_rcu_unlock;
83797+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
83798+ goto out_rcu_unlock;
83799+ rcu_read_unlock();
83800+
83801+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
83802+
83803+ return;
83804+out_rcu_unlock:
83805+ rcu_read_unlock();
83806+ return;
83807+}
83808diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
83809new file mode 100644
83810index 0000000..35d9e65
83811--- /dev/null
83812+++ b/grsecurity/gracl_segv.c
83813@@ -0,0 +1,324 @@
83814+#include <linux/kernel.h>
83815+#include <linux/mm.h>
83816+#include <asm/uaccess.h>
83817+#include <asm/errno.h>
83818+#include <asm/mman.h>
83819+#include <net/sock.h>
83820+#include <linux/file.h>
83821+#include <linux/fs.h>
83822+#include <linux/net.h>
83823+#include <linux/in.h>
83824+#include <linux/slab.h>
83825+#include <linux/types.h>
83826+#include <linux/sched.h>
83827+#include <linux/timer.h>
83828+#include <linux/gracl.h>
83829+#include <linux/grsecurity.h>
83830+#include <linux/grinternal.h>
83831+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
83832+#include <linux/magic.h>
83833+#include <linux/pagemap.h>
83834+#include "../fs/btrfs/async-thread.h"
83835+#include "../fs/btrfs/ctree.h"
83836+#include "../fs/btrfs/btrfs_inode.h"
83837+#endif
83838+
83839+static struct crash_uid *uid_set;
83840+static unsigned short uid_used;
83841+static DEFINE_SPINLOCK(gr_uid_lock);
83842+extern rwlock_t gr_inode_lock;
83843+extern struct acl_subject_label *
83844+ lookup_acl_subj_label(const u64 inode, const dev_t dev,
83845+ struct acl_role_label *role);
83846+
83847+static inline dev_t __get_dev(const struct dentry *dentry)
83848+{
83849+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
83850+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
83851+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
83852+ else
83853+#endif
83854+ return dentry->d_sb->s_dev;
83855+}
83856+
83857+static inline u64 __get_ino(const struct dentry *dentry)
83858+{
83859+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
83860+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
83861+ return btrfs_ino(dentry->d_inode);
83862+ else
83863+#endif
83864+ return dentry->d_inode->i_ino;
83865+}
83866+
83867+int
83868+gr_init_uidset(void)
83869+{
83870+ uid_set =
83871+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
83872+ uid_used = 0;
83873+
83874+ return uid_set ? 1 : 0;
83875+}
83876+
83877+void
83878+gr_free_uidset(void)
83879+{
83880+ if (uid_set) {
83881+ struct crash_uid *tmpset;
83882+ spin_lock(&gr_uid_lock);
83883+ tmpset = uid_set;
83884+ uid_set = NULL;
83885+ uid_used = 0;
83886+ spin_unlock(&gr_uid_lock);
83887+ if (tmpset)
83888+ kfree(tmpset);
83889+ }
83890+
83891+ return;
83892+}
83893+
83894+int
83895+gr_find_uid(const uid_t uid)
83896+{
83897+ struct crash_uid *tmp = uid_set;
83898+ uid_t buid;
83899+ int low = 0, high = uid_used - 1, mid;
83900+
83901+ while (high >= low) {
83902+ mid = (low + high) >> 1;
83903+ buid = tmp[mid].uid;
83904+ if (buid == uid)
83905+ return mid;
83906+ if (buid > uid)
83907+ high = mid - 1;
83908+ if (buid < uid)
83909+ low = mid + 1;
83910+ }
83911+
83912+ return -1;
83913+}
83914+
83915+static void
83916+gr_insertsort(void)
83917+{
83918+ unsigned short i, j;
83919+ struct crash_uid index;
83920+
83921+ for (i = 1; i < uid_used; i++) {
83922+ index = uid_set[i];
83923+ j = i;
83924+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
83925+ uid_set[j] = uid_set[j - 1];
83926+ j--;
83927+ }
83928+ uid_set[j] = index;
83929+ }
83930+
83931+ return;
83932+}
83933+
83934+static void
83935+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
83936+{
83937+ int loc;
83938+ uid_t uid = GR_GLOBAL_UID(kuid);
83939+
83940+ if (uid_used == GR_UIDTABLE_MAX)
83941+ return;
83942+
83943+ loc = gr_find_uid(uid);
83944+
83945+ if (loc >= 0) {
83946+ uid_set[loc].expires = expires;
83947+ return;
83948+ }
83949+
83950+ uid_set[uid_used].uid = uid;
83951+ uid_set[uid_used].expires = expires;
83952+ uid_used++;
83953+
83954+ gr_insertsort();
83955+
83956+ return;
83957+}
83958+
83959+void
83960+gr_remove_uid(const unsigned short loc)
83961+{
83962+ unsigned short i;
83963+
83964+ for (i = loc + 1; i < uid_used; i++)
83965+ uid_set[i - 1] = uid_set[i];
83966+
83967+ uid_used--;
83968+
83969+ return;
83970+}
83971+
83972+int
83973+gr_check_crash_uid(const kuid_t kuid)
83974+{
83975+ int loc;
83976+ int ret = 0;
83977+ uid_t uid;
83978+
83979+ if (unlikely(!gr_acl_is_enabled()))
83980+ return 0;
83981+
83982+ uid = GR_GLOBAL_UID(kuid);
83983+
83984+ spin_lock(&gr_uid_lock);
83985+ loc = gr_find_uid(uid);
83986+
83987+ if (loc < 0)
83988+ goto out_unlock;
83989+
83990+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
83991+ gr_remove_uid(loc);
83992+ else
83993+ ret = 1;
83994+
83995+out_unlock:
83996+ spin_unlock(&gr_uid_lock);
83997+ return ret;
83998+}
83999+
84000+static int
84001+proc_is_setxid(const struct cred *cred)
84002+{
84003+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
84004+ !uid_eq(cred->uid, cred->fsuid))
84005+ return 1;
84006+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
84007+ !gid_eq(cred->gid, cred->fsgid))
84008+ return 1;
84009+
84010+ return 0;
84011+}
84012+
84013+extern int gr_fake_force_sig(int sig, struct task_struct *t);
84014+
84015+void
84016+gr_handle_crash(struct task_struct *task, const int sig)
84017+{
84018+ struct acl_subject_label *curr;
84019+ struct task_struct *tsk, *tsk2;
84020+ const struct cred *cred;
84021+ const struct cred *cred2;
84022+
84023+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
84024+ return;
84025+
84026+ if (unlikely(!gr_acl_is_enabled()))
84027+ return;
84028+
84029+ curr = task->acl;
84030+
84031+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
84032+ return;
84033+
84034+ if (time_before_eq(curr->expires, get_seconds())) {
84035+ curr->expires = 0;
84036+ curr->crashes = 0;
84037+ }
84038+
84039+ curr->crashes++;
84040+
84041+ if (!curr->expires)
84042+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
84043+
84044+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
84045+ time_after(curr->expires, get_seconds())) {
84046+ rcu_read_lock();
84047+ cred = __task_cred(task);
84048+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
84049+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
84050+ spin_lock(&gr_uid_lock);
84051+ gr_insert_uid(cred->uid, curr->expires);
84052+ spin_unlock(&gr_uid_lock);
84053+ curr->expires = 0;
84054+ curr->crashes = 0;
84055+ read_lock(&tasklist_lock);
84056+ do_each_thread(tsk2, tsk) {
84057+ cred2 = __task_cred(tsk);
84058+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
84059+ gr_fake_force_sig(SIGKILL, tsk);
84060+ } while_each_thread(tsk2, tsk);
84061+ read_unlock(&tasklist_lock);
84062+ } else {
84063+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
84064+ read_lock(&tasklist_lock);
84065+ read_lock(&grsec_exec_file_lock);
84066+ do_each_thread(tsk2, tsk) {
84067+ if (likely(tsk != task)) {
84068+ // if this thread has the same subject as the one that triggered
84069+ // RES_CRASH and it's the same binary, kill it
84070+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
84071+ gr_fake_force_sig(SIGKILL, tsk);
84072+ }
84073+ } while_each_thread(tsk2, tsk);
84074+ read_unlock(&grsec_exec_file_lock);
84075+ read_unlock(&tasklist_lock);
84076+ }
84077+ rcu_read_unlock();
84078+ }
84079+
84080+ return;
84081+}
84082+
84083+int
84084+gr_check_crash_exec(const struct file *filp)
84085+{
84086+ struct acl_subject_label *curr;
84087+ struct dentry *dentry;
84088+
84089+ if (unlikely(!gr_acl_is_enabled()))
84090+ return 0;
84091+
84092+ read_lock(&gr_inode_lock);
84093+ dentry = filp->f_path.dentry;
84094+ curr = lookup_acl_subj_label(__get_ino(dentry), __get_dev(dentry),
84095+ current->role);
84096+ read_unlock(&gr_inode_lock);
84097+
84098+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
84099+ (!curr->crashes && !curr->expires))
84100+ return 0;
84101+
84102+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
84103+ time_after(curr->expires, get_seconds()))
84104+ return 1;
84105+ else if (time_before_eq(curr->expires, get_seconds())) {
84106+ curr->crashes = 0;
84107+ curr->expires = 0;
84108+ }
84109+
84110+ return 0;
84111+}
84112+
84113+void
84114+gr_handle_alertkill(struct task_struct *task)
84115+{
84116+ struct acl_subject_label *curracl;
84117+ __u32 curr_ip;
84118+ struct task_struct *p, *p2;
84119+
84120+ if (unlikely(!gr_acl_is_enabled()))
84121+ return;
84122+
84123+ curracl = task->acl;
84124+ curr_ip = task->signal->curr_ip;
84125+
84126+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
84127+ read_lock(&tasklist_lock);
84128+ do_each_thread(p2, p) {
84129+ if (p->signal->curr_ip == curr_ip)
84130+ gr_fake_force_sig(SIGKILL, p);
84131+ } while_each_thread(p2, p);
84132+ read_unlock(&tasklist_lock);
84133+ } else if (curracl->mode & GR_KILLPROC)
84134+ gr_fake_force_sig(SIGKILL, task);
84135+
84136+ return;
84137+}
84138diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
84139new file mode 100644
84140index 0000000..6b0c9cc
84141--- /dev/null
84142+++ b/grsecurity/gracl_shm.c
84143@@ -0,0 +1,40 @@
84144+#include <linux/kernel.h>
84145+#include <linux/mm.h>
84146+#include <linux/sched.h>
84147+#include <linux/file.h>
84148+#include <linux/ipc.h>
84149+#include <linux/gracl.h>
84150+#include <linux/grsecurity.h>
84151+#include <linux/grinternal.h>
84152+
84153+int
84154+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
84155+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
84156+{
84157+ struct task_struct *task;
84158+
84159+ if (!gr_acl_is_enabled())
84160+ return 1;
84161+
84162+ rcu_read_lock();
84163+ read_lock(&tasklist_lock);
84164+
84165+ task = find_task_by_vpid(shm_cprid);
84166+
84167+ if (unlikely(!task))
84168+ task = find_task_by_vpid(shm_lapid);
84169+
84170+ if (unlikely(task && (time_before_eq64(task->start_time, shm_createtime) ||
84171+ (task_pid_nr(task) == shm_lapid)) &&
84172+ (task->acl->mode & GR_PROTSHM) &&
84173+ (task->acl != current->acl))) {
84174+ read_unlock(&tasklist_lock);
84175+ rcu_read_unlock();
84176+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
84177+ return 0;
84178+ }
84179+ read_unlock(&tasklist_lock);
84180+ rcu_read_unlock();
84181+
84182+ return 1;
84183+}
84184diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
84185new file mode 100644
84186index 0000000..bc0be01
84187--- /dev/null
84188+++ b/grsecurity/grsec_chdir.c
84189@@ -0,0 +1,19 @@
84190+#include <linux/kernel.h>
84191+#include <linux/sched.h>
84192+#include <linux/fs.h>
84193+#include <linux/file.h>
84194+#include <linux/grsecurity.h>
84195+#include <linux/grinternal.h>
84196+
84197+void
84198+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
84199+{
84200+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
84201+ if ((grsec_enable_chdir && grsec_enable_group &&
84202+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
84203+ !grsec_enable_group)) {
84204+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
84205+ }
84206+#endif
84207+ return;
84208+}
84209diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
84210new file mode 100644
84211index 0000000..114ea4f
84212--- /dev/null
84213+++ b/grsecurity/grsec_chroot.c
84214@@ -0,0 +1,467 @@
84215+#include <linux/kernel.h>
84216+#include <linux/module.h>
84217+#include <linux/sched.h>
84218+#include <linux/file.h>
84219+#include <linux/fs.h>
84220+#include <linux/mount.h>
84221+#include <linux/types.h>
84222+#include "../fs/mount.h"
84223+#include <linux/grsecurity.h>
84224+#include <linux/grinternal.h>
84225+
84226+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
84227+int gr_init_ran;
84228+#endif
84229+
84230+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
84231+{
84232+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
84233+ struct dentry *tmpd = dentry;
84234+
84235+ read_seqlock_excl(&mount_lock);
84236+ write_seqlock(&rename_lock);
84237+
84238+ while (tmpd != mnt->mnt_root) {
84239+ atomic_inc(&tmpd->chroot_refcnt);
84240+ tmpd = tmpd->d_parent;
84241+ }
84242+ atomic_inc(&tmpd->chroot_refcnt);
84243+
84244+ write_sequnlock(&rename_lock);
84245+ read_sequnlock_excl(&mount_lock);
84246+#endif
84247+}
84248+
84249+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
84250+{
84251+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
84252+ struct dentry *tmpd = dentry;
84253+
84254+ read_seqlock_excl(&mount_lock);
84255+ write_seqlock(&rename_lock);
84256+
84257+ while (tmpd != mnt->mnt_root) {
84258+ atomic_dec(&tmpd->chroot_refcnt);
84259+ tmpd = tmpd->d_parent;
84260+ }
84261+ atomic_dec(&tmpd->chroot_refcnt);
84262+
84263+ write_sequnlock(&rename_lock);
84264+ read_sequnlock_excl(&mount_lock);
84265+#endif
84266+}
84267+
84268+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
84269+static struct dentry *get_closest_chroot(struct dentry *dentry)
84270+{
84271+ write_seqlock(&rename_lock);
84272+ do {
84273+ if (atomic_read(&dentry->chroot_refcnt)) {
84274+ write_sequnlock(&rename_lock);
84275+ return dentry;
84276+ }
84277+ dentry = dentry->d_parent;
84278+ } while (!IS_ROOT(dentry));
84279+ write_sequnlock(&rename_lock);
84280+ return NULL;
84281+}
84282+#endif
84283+
84284+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
84285+ struct dentry *newdentry, struct vfsmount *newmnt)
84286+{
84287+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
84288+ struct dentry *chroot;
84289+
84290+ if (unlikely(!grsec_enable_chroot_rename))
84291+ return 0;
84292+
84293+ if (likely(!proc_is_chrooted(current) && gr_is_global_root(current_uid())))
84294+ return 0;
84295+
84296+ chroot = get_closest_chroot(olddentry);
84297+
84298+ if (chroot == NULL)
84299+ return 0;
84300+
84301+ if (is_subdir(newdentry, chroot))
84302+ return 0;
84303+
84304+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_RENAME_MSG, olddentry, oldmnt);
84305+
84306+ return 1;
84307+#else
84308+ return 0;
84309+#endif
84310+}
84311+
84312+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
84313+{
84314+#ifdef CONFIG_GRKERNSEC
84315+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
84316+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
84317+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
84318+ && gr_init_ran
84319+#endif
84320+ )
84321+ task->gr_is_chrooted = 1;
84322+ else {
84323+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
84324+ if (task_pid_nr(task) == 1 && !gr_init_ran)
84325+ gr_init_ran = 1;
84326+#endif
84327+ task->gr_is_chrooted = 0;
84328+ }
84329+
84330+ task->gr_chroot_dentry = path->dentry;
84331+#endif
84332+ return;
84333+}
84334+
84335+void gr_clear_chroot_entries(struct task_struct *task)
84336+{
84337+#ifdef CONFIG_GRKERNSEC
84338+ task->gr_is_chrooted = 0;
84339+ task->gr_chroot_dentry = NULL;
84340+#endif
84341+ return;
84342+}
84343+
84344+int
84345+gr_handle_chroot_unix(const pid_t pid)
84346+{
84347+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
84348+ struct task_struct *p;
84349+
84350+ if (unlikely(!grsec_enable_chroot_unix))
84351+ return 1;
84352+
84353+ if (likely(!proc_is_chrooted(current)))
84354+ return 1;
84355+
84356+ rcu_read_lock();
84357+ read_lock(&tasklist_lock);
84358+ p = find_task_by_vpid_unrestricted(pid);
84359+ if (unlikely(p && !have_same_root(current, p))) {
84360+ read_unlock(&tasklist_lock);
84361+ rcu_read_unlock();
84362+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
84363+ return 0;
84364+ }
84365+ read_unlock(&tasklist_lock);
84366+ rcu_read_unlock();
84367+#endif
84368+ return 1;
84369+}
84370+
84371+int
84372+gr_handle_chroot_nice(void)
84373+{
84374+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
84375+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
84376+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
84377+ return -EPERM;
84378+ }
84379+#endif
84380+ return 0;
84381+}
84382+
84383+int
84384+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
84385+{
84386+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
84387+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
84388+ && proc_is_chrooted(current)) {
84389+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
84390+ return -EACCES;
84391+ }
84392+#endif
84393+ return 0;
84394+}
84395+
84396+int
84397+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
84398+{
84399+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
84400+ struct task_struct *p;
84401+ int ret = 0;
84402+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
84403+ return ret;
84404+
84405+ read_lock(&tasklist_lock);
84406+ do_each_pid_task(pid, type, p) {
84407+ if (!have_same_root(current, p)) {
84408+ ret = 1;
84409+ goto out;
84410+ }
84411+ } while_each_pid_task(pid, type, p);
84412+out:
84413+ read_unlock(&tasklist_lock);
84414+ return ret;
84415+#endif
84416+ return 0;
84417+}
84418+
84419+int
84420+gr_pid_is_chrooted(struct task_struct *p)
84421+{
84422+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
84423+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
84424+ return 0;
84425+
84426+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
84427+ !have_same_root(current, p)) {
84428+ return 1;
84429+ }
84430+#endif
84431+ return 0;
84432+}
84433+
84434+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
84435+
84436+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
84437+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
84438+{
84439+ struct path path, currentroot;
84440+ int ret = 0;
84441+
84442+ path.dentry = (struct dentry *)u_dentry;
84443+ path.mnt = (struct vfsmount *)u_mnt;
84444+ get_fs_root(current->fs, &currentroot);
84445+ if (path_is_under(&path, &currentroot))
84446+ ret = 1;
84447+ path_put(&currentroot);
84448+
84449+ return ret;
84450+}
84451+#endif
84452+
84453+int
84454+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
84455+{
84456+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
84457+ if (!grsec_enable_chroot_fchdir)
84458+ return 1;
84459+
84460+ if (!proc_is_chrooted(current))
84461+ return 1;
84462+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
84463+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
84464+ return 0;
84465+ }
84466+#endif
84467+ return 1;
84468+}
84469+
84470+int
84471+gr_chroot_fhandle(void)
84472+{
84473+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
84474+ if (!grsec_enable_chroot_fchdir)
84475+ return 1;
84476+
84477+ if (!proc_is_chrooted(current))
84478+ return 1;
84479+ else {
84480+ gr_log_noargs(GR_DONT_AUDIT, GR_CHROOT_FHANDLE_MSG);
84481+ return 0;
84482+ }
84483+#endif
84484+ return 1;
84485+}
84486+
84487+int
84488+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
84489+ const u64 shm_createtime)
84490+{
84491+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
84492+ struct task_struct *p;
84493+
84494+ if (unlikely(!grsec_enable_chroot_shmat))
84495+ return 1;
84496+
84497+ if (likely(!proc_is_chrooted(current)))
84498+ return 1;
84499+
84500+ rcu_read_lock();
84501+ read_lock(&tasklist_lock);
84502+
84503+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
84504+ if (time_before_eq64(p->start_time, shm_createtime)) {
84505+ if (have_same_root(current, p)) {
84506+ goto allow;
84507+ } else {
84508+ read_unlock(&tasklist_lock);
84509+ rcu_read_unlock();
84510+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
84511+ return 0;
84512+ }
84513+ }
84514+ /* creator exited, pid reuse, fall through to next check */
84515+ }
84516+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
84517+ if (unlikely(!have_same_root(current, p))) {
84518+ read_unlock(&tasklist_lock);
84519+ rcu_read_unlock();
84520+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
84521+ return 0;
84522+ }
84523+ }
84524+
84525+allow:
84526+ read_unlock(&tasklist_lock);
84527+ rcu_read_unlock();
84528+#endif
84529+ return 1;
84530+}
84531+
84532+void
84533+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
84534+{
84535+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
84536+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
84537+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
84538+#endif
84539+ return;
84540+}
84541+
84542+int
84543+gr_handle_chroot_mknod(const struct dentry *dentry,
84544+ const struct vfsmount *mnt, const int mode)
84545+{
84546+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
84547+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
84548+ proc_is_chrooted(current)) {
84549+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
84550+ return -EPERM;
84551+ }
84552+#endif
84553+ return 0;
84554+}
84555+
84556+int
84557+gr_handle_chroot_mount(const struct dentry *dentry,
84558+ const struct vfsmount *mnt, const char *dev_name)
84559+{
84560+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
84561+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
84562+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
84563+ return -EPERM;
84564+ }
84565+#endif
84566+ return 0;
84567+}
84568+
84569+int
84570+gr_handle_chroot_pivot(void)
84571+{
84572+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
84573+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
84574+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
84575+ return -EPERM;
84576+ }
84577+#endif
84578+ return 0;
84579+}
84580+
84581+int
84582+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
84583+{
84584+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
84585+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
84586+ !gr_is_outside_chroot(dentry, mnt)) {
84587+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
84588+ return -EPERM;
84589+ }
84590+#endif
84591+ return 0;
84592+}
84593+
84594+extern const char *captab_log[];
84595+extern int captab_log_entries;
84596+
84597+int
84598+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
84599+{
84600+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
84601+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
84602+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
84603+ if (cap_raised(chroot_caps, cap)) {
84604+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
84605+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
84606+ }
84607+ return 0;
84608+ }
84609+ }
84610+#endif
84611+ return 1;
84612+}
84613+
84614+int
84615+gr_chroot_is_capable(const int cap)
84616+{
84617+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
84618+ return gr_task_chroot_is_capable(current, current_cred(), cap);
84619+#endif
84620+ return 1;
84621+}
84622+
84623+int
84624+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
84625+{
84626+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
84627+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
84628+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
84629+ if (cap_raised(chroot_caps, cap)) {
84630+ return 0;
84631+ }
84632+ }
84633+#endif
84634+ return 1;
84635+}
84636+
84637+int
84638+gr_chroot_is_capable_nolog(const int cap)
84639+{
84640+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
84641+ return gr_task_chroot_is_capable_nolog(current, cap);
84642+#endif
84643+ return 1;
84644+}
84645+
84646+int
84647+gr_handle_chroot_sysctl(const int op)
84648+{
84649+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
84650+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
84651+ proc_is_chrooted(current))
84652+ return -EACCES;
84653+#endif
84654+ return 0;
84655+}
84656+
84657+void
84658+gr_handle_chroot_chdir(const struct path *path)
84659+{
84660+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
84661+ if (grsec_enable_chroot_chdir)
84662+ set_fs_pwd(current->fs, path);
84663+#endif
84664+ return;
84665+}
84666+
84667+int
84668+gr_handle_chroot_chmod(const struct dentry *dentry,
84669+ const struct vfsmount *mnt, const int mode)
84670+{
84671+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
84672+ /* allow chmod +s on directories, but not files */
84673+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
84674+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
84675+ proc_is_chrooted(current)) {
84676+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
84677+ return -EPERM;
84678+ }
84679+#endif
84680+ return 0;
84681+}
84682diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
84683new file mode 100644
84684index 0000000..946f750
84685--- /dev/null
84686+++ b/grsecurity/grsec_disabled.c
84687@@ -0,0 +1,445 @@
84688+#include <linux/kernel.h>
84689+#include <linux/module.h>
84690+#include <linux/sched.h>
84691+#include <linux/file.h>
84692+#include <linux/fs.h>
84693+#include <linux/kdev_t.h>
84694+#include <linux/net.h>
84695+#include <linux/in.h>
84696+#include <linux/ip.h>
84697+#include <linux/skbuff.h>
84698+#include <linux/sysctl.h>
84699+
84700+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
84701+void
84702+pax_set_initial_flags(struct linux_binprm *bprm)
84703+{
84704+ return;
84705+}
84706+#endif
84707+
84708+#ifdef CONFIG_SYSCTL
84709+__u32
84710+gr_handle_sysctl(const struct ctl_table * table, const int op)
84711+{
84712+ return 0;
84713+}
84714+#endif
84715+
84716+#ifdef CONFIG_TASKSTATS
84717+int gr_is_taskstats_denied(int pid)
84718+{
84719+ return 0;
84720+}
84721+#endif
84722+
84723+int
84724+gr_acl_is_enabled(void)
84725+{
84726+ return 0;
84727+}
84728+
84729+int
84730+gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
84731+{
84732+ return 0;
84733+}
84734+
84735+void
84736+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
84737+{
84738+ return;
84739+}
84740+
84741+int
84742+gr_handle_rawio(const struct inode *inode)
84743+{
84744+ return 0;
84745+}
84746+
84747+void
84748+gr_acl_handle_psacct(struct task_struct *task, const long code)
84749+{
84750+ return;
84751+}
84752+
84753+int
84754+gr_handle_ptrace(struct task_struct *task, const long request)
84755+{
84756+ return 0;
84757+}
84758+
84759+int
84760+gr_handle_proc_ptrace(struct task_struct *task)
84761+{
84762+ return 0;
84763+}
84764+
84765+int
84766+gr_set_acls(const int type)
84767+{
84768+ return 0;
84769+}
84770+
84771+int
84772+gr_check_hidden_task(const struct task_struct *tsk)
84773+{
84774+ return 0;
84775+}
84776+
84777+int
84778+gr_check_protected_task(const struct task_struct *task)
84779+{
84780+ return 0;
84781+}
84782+
84783+int
84784+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
84785+{
84786+ return 0;
84787+}
84788+
84789+void
84790+gr_copy_label(struct task_struct *tsk)
84791+{
84792+ return;
84793+}
84794+
84795+void
84796+gr_set_pax_flags(struct task_struct *task)
84797+{
84798+ return;
84799+}
84800+
84801+int
84802+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
84803+ const int unsafe_share)
84804+{
84805+ return 0;
84806+}
84807+
84808+void
84809+gr_handle_delete(const u64 ino, const dev_t dev)
84810+{
84811+ return;
84812+}
84813+
84814+void
84815+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
84816+{
84817+ return;
84818+}
84819+
84820+void
84821+gr_handle_crash(struct task_struct *task, const int sig)
84822+{
84823+ return;
84824+}
84825+
84826+int
84827+gr_check_crash_exec(const struct file *filp)
84828+{
84829+ return 0;
84830+}
84831+
84832+int
84833+gr_check_crash_uid(const kuid_t uid)
84834+{
84835+ return 0;
84836+}
84837+
84838+void
84839+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
84840+ struct dentry *old_dentry,
84841+ struct dentry *new_dentry,
84842+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
84843+{
84844+ return;
84845+}
84846+
84847+int
84848+gr_search_socket(const int family, const int type, const int protocol)
84849+{
84850+ return 1;
84851+}
84852+
84853+int
84854+gr_search_connectbind(const int mode, const struct socket *sock,
84855+ const struct sockaddr_in *addr)
84856+{
84857+ return 0;
84858+}
84859+
84860+void
84861+gr_handle_alertkill(struct task_struct *task)
84862+{
84863+ return;
84864+}
84865+
84866+__u32
84867+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
84868+{
84869+ return 1;
84870+}
84871+
84872+__u32
84873+gr_acl_handle_hidden_file(const struct dentry * dentry,
84874+ const struct vfsmount * mnt)
84875+{
84876+ return 1;
84877+}
84878+
84879+__u32
84880+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
84881+ int acc_mode)
84882+{
84883+ return 1;
84884+}
84885+
84886+__u32
84887+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
84888+{
84889+ return 1;
84890+}
84891+
84892+__u32
84893+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
84894+{
84895+ return 1;
84896+}
84897+
84898+int
84899+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
84900+ unsigned int *vm_flags)
84901+{
84902+ return 1;
84903+}
84904+
84905+__u32
84906+gr_acl_handle_truncate(const struct dentry * dentry,
84907+ const struct vfsmount * mnt)
84908+{
84909+ return 1;
84910+}
84911+
84912+__u32
84913+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
84914+{
84915+ return 1;
84916+}
84917+
84918+__u32
84919+gr_acl_handle_access(const struct dentry * dentry,
84920+ const struct vfsmount * mnt, const int fmode)
84921+{
84922+ return 1;
84923+}
84924+
84925+__u32
84926+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
84927+ umode_t *mode)
84928+{
84929+ return 1;
84930+}
84931+
84932+__u32
84933+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
84934+{
84935+ return 1;
84936+}
84937+
84938+__u32
84939+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
84940+{
84941+ return 1;
84942+}
84943+
84944+__u32
84945+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
84946+{
84947+ return 1;
84948+}
84949+
84950+void
84951+grsecurity_init(void)
84952+{
84953+ return;
84954+}
84955+
84956+umode_t gr_acl_umask(void)
84957+{
84958+ return 0;
84959+}
84960+
84961+__u32
84962+gr_acl_handle_mknod(const struct dentry * new_dentry,
84963+ const struct dentry * parent_dentry,
84964+ const struct vfsmount * parent_mnt,
84965+ const int mode)
84966+{
84967+ return 1;
84968+}
84969+
84970+__u32
84971+gr_acl_handle_mkdir(const struct dentry * new_dentry,
84972+ const struct dentry * parent_dentry,
84973+ const struct vfsmount * parent_mnt)
84974+{
84975+ return 1;
84976+}
84977+
84978+__u32
84979+gr_acl_handle_symlink(const struct dentry * new_dentry,
84980+ const struct dentry * parent_dentry,
84981+ const struct vfsmount * parent_mnt, const struct filename *from)
84982+{
84983+ return 1;
84984+}
84985+
84986+__u32
84987+gr_acl_handle_link(const struct dentry * new_dentry,
84988+ const struct dentry * parent_dentry,
84989+ const struct vfsmount * parent_mnt,
84990+ const struct dentry * old_dentry,
84991+ const struct vfsmount * old_mnt, const struct filename *to)
84992+{
84993+ return 1;
84994+}
84995+
84996+int
84997+gr_acl_handle_rename(const struct dentry *new_dentry,
84998+ const struct dentry *parent_dentry,
84999+ const struct vfsmount *parent_mnt,
85000+ const struct dentry *old_dentry,
85001+ const struct inode *old_parent_inode,
85002+ const struct vfsmount *old_mnt, const struct filename *newname,
85003+ unsigned int flags)
85004+{
85005+ return 0;
85006+}
85007+
85008+int
85009+gr_acl_handle_filldir(const struct file *file, const char *name,
85010+ const int namelen, const u64 ino)
85011+{
85012+ return 1;
85013+}
85014+
85015+int
85016+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
85017+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
85018+{
85019+ return 1;
85020+}
85021+
85022+int
85023+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
85024+{
85025+ return 0;
85026+}
85027+
85028+int
85029+gr_search_accept(const struct socket *sock)
85030+{
85031+ return 0;
85032+}
85033+
85034+int
85035+gr_search_listen(const struct socket *sock)
85036+{
85037+ return 0;
85038+}
85039+
85040+int
85041+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
85042+{
85043+ return 0;
85044+}
85045+
85046+__u32
85047+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
85048+{
85049+ return 1;
85050+}
85051+
85052+__u32
85053+gr_acl_handle_creat(const struct dentry * dentry,
85054+ const struct dentry * p_dentry,
85055+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
85056+ const int imode)
85057+{
85058+ return 1;
85059+}
85060+
85061+void
85062+gr_acl_handle_exit(void)
85063+{
85064+ return;
85065+}
85066+
85067+int
85068+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
85069+{
85070+ return 1;
85071+}
85072+
85073+void
85074+gr_set_role_label(const kuid_t uid, const kgid_t gid)
85075+{
85076+ return;
85077+}
85078+
85079+int
85080+gr_acl_handle_procpidmem(const struct task_struct *task)
85081+{
85082+ return 0;
85083+}
85084+
85085+int
85086+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
85087+{
85088+ return 0;
85089+}
85090+
85091+int
85092+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
85093+{
85094+ return 0;
85095+}
85096+
85097+int
85098+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
85099+{
85100+ return 0;
85101+}
85102+
85103+int
85104+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
85105+{
85106+ return 0;
85107+}
85108+
85109+int gr_acl_enable_at_secure(void)
85110+{
85111+ return 0;
85112+}
85113+
85114+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
85115+{
85116+ return dentry->d_sb->s_dev;
85117+}
85118+
85119+u64 gr_get_ino_from_dentry(struct dentry *dentry)
85120+{
85121+ return dentry->d_inode->i_ino;
85122+}
85123+
85124+void gr_put_exec_file(struct task_struct *task)
85125+{
85126+ return;
85127+}
85128+
85129+#ifdef CONFIG_SECURITY
85130+EXPORT_SYMBOL_GPL(gr_check_user_change);
85131+EXPORT_SYMBOL_GPL(gr_check_group_change);
85132+#endif
85133diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
85134new file mode 100644
85135index 0000000..fb7531e
85136--- /dev/null
85137+++ b/grsecurity/grsec_exec.c
85138@@ -0,0 +1,189 @@
85139+#include <linux/kernel.h>
85140+#include <linux/sched.h>
85141+#include <linux/file.h>
85142+#include <linux/binfmts.h>
85143+#include <linux/fs.h>
85144+#include <linux/types.h>
85145+#include <linux/grdefs.h>
85146+#include <linux/grsecurity.h>
85147+#include <linux/grinternal.h>
85148+#include <linux/capability.h>
85149+#include <linux/module.h>
85150+#include <linux/compat.h>
85151+
85152+#include <asm/uaccess.h>
85153+
85154+#ifdef CONFIG_GRKERNSEC_EXECLOG
85155+static char gr_exec_arg_buf[132];
85156+static DEFINE_MUTEX(gr_exec_arg_mutex);
85157+#endif
85158+
85159+struct user_arg_ptr {
85160+#ifdef CONFIG_COMPAT
85161+ bool is_compat;
85162+#endif
85163+ union {
85164+ const char __user *const __user *native;
85165+#ifdef CONFIG_COMPAT
85166+ const compat_uptr_t __user *compat;
85167+#endif
85168+ } ptr;
85169+};
85170+
85171+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
85172+
85173+void
85174+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
85175+{
85176+#ifdef CONFIG_GRKERNSEC_EXECLOG
85177+ char *grarg = gr_exec_arg_buf;
85178+ unsigned int i, x, execlen = 0;
85179+ char c;
85180+
85181+ if (!((grsec_enable_execlog && grsec_enable_group &&
85182+ in_group_p(grsec_audit_gid))
85183+ || (grsec_enable_execlog && !grsec_enable_group)))
85184+ return;
85185+
85186+ mutex_lock(&gr_exec_arg_mutex);
85187+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
85188+
85189+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
85190+ const char __user *p;
85191+ unsigned int len;
85192+
85193+ p = get_user_arg_ptr(argv, i);
85194+ if (IS_ERR(p))
85195+ goto log;
85196+
85197+ len = strnlen_user(p, 128 - execlen);
85198+ if (len > 128 - execlen)
85199+ len = 128 - execlen;
85200+ else if (len > 0)
85201+ len--;
85202+ if (copy_from_user(grarg + execlen, p, len))
85203+ goto log;
85204+
85205+ /* rewrite unprintable characters */
85206+ for (x = 0; x < len; x++) {
85207+ c = *(grarg + execlen + x);
85208+ if (c < 32 || c > 126)
85209+ *(grarg + execlen + x) = ' ';
85210+ }
85211+
85212+ execlen += len;
85213+ *(grarg + execlen) = ' ';
85214+ *(grarg + execlen + 1) = '\0';
85215+ execlen++;
85216+ }
85217+
85218+ log:
85219+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
85220+ bprm->file->f_path.mnt, grarg);
85221+ mutex_unlock(&gr_exec_arg_mutex);
85222+#endif
85223+ return;
85224+}
85225+
85226+#ifdef CONFIG_GRKERNSEC
85227+extern int gr_acl_is_capable(const int cap);
85228+extern int gr_acl_is_capable_nolog(const int cap);
85229+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
85230+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
85231+extern int gr_chroot_is_capable(const int cap);
85232+extern int gr_chroot_is_capable_nolog(const int cap);
85233+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
85234+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
85235+#endif
85236+
85237+const char *captab_log[] = {
85238+ "CAP_CHOWN",
85239+ "CAP_DAC_OVERRIDE",
85240+ "CAP_DAC_READ_SEARCH",
85241+ "CAP_FOWNER",
85242+ "CAP_FSETID",
85243+ "CAP_KILL",
85244+ "CAP_SETGID",
85245+ "CAP_SETUID",
85246+ "CAP_SETPCAP",
85247+ "CAP_LINUX_IMMUTABLE",
85248+ "CAP_NET_BIND_SERVICE",
85249+ "CAP_NET_BROADCAST",
85250+ "CAP_NET_ADMIN",
85251+ "CAP_NET_RAW",
85252+ "CAP_IPC_LOCK",
85253+ "CAP_IPC_OWNER",
85254+ "CAP_SYS_MODULE",
85255+ "CAP_SYS_RAWIO",
85256+ "CAP_SYS_CHROOT",
85257+ "CAP_SYS_PTRACE",
85258+ "CAP_SYS_PACCT",
85259+ "CAP_SYS_ADMIN",
85260+ "CAP_SYS_BOOT",
85261+ "CAP_SYS_NICE",
85262+ "CAP_SYS_RESOURCE",
85263+ "CAP_SYS_TIME",
85264+ "CAP_SYS_TTY_CONFIG",
85265+ "CAP_MKNOD",
85266+ "CAP_LEASE",
85267+ "CAP_AUDIT_WRITE",
85268+ "CAP_AUDIT_CONTROL",
85269+ "CAP_SETFCAP",
85270+ "CAP_MAC_OVERRIDE",
85271+ "CAP_MAC_ADMIN",
85272+ "CAP_SYSLOG",
85273+ "CAP_WAKE_ALARM",
85274+ "CAP_BLOCK_SUSPEND",
85275+ "CAP_AUDIT_READ"
85276+};
85277+
85278+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
85279+
85280+int gr_is_capable(const int cap)
85281+{
85282+#ifdef CONFIG_GRKERNSEC
85283+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
85284+ return 1;
85285+ return 0;
85286+#else
85287+ return 1;
85288+#endif
85289+}
85290+
85291+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
85292+{
85293+#ifdef CONFIG_GRKERNSEC
85294+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
85295+ return 1;
85296+ return 0;
85297+#else
85298+ return 1;
85299+#endif
85300+}
85301+
85302+int gr_is_capable_nolog(const int cap)
85303+{
85304+#ifdef CONFIG_GRKERNSEC
85305+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
85306+ return 1;
85307+ return 0;
85308+#else
85309+ return 1;
85310+#endif
85311+}
85312+
85313+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
85314+{
85315+#ifdef CONFIG_GRKERNSEC
85316+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
85317+ return 1;
85318+ return 0;
85319+#else
85320+ return 1;
85321+#endif
85322+}
85323+
85324+EXPORT_SYMBOL_GPL(gr_is_capable);
85325+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
85326+EXPORT_SYMBOL_GPL(gr_task_is_capable);
85327+EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
85328diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
85329new file mode 100644
85330index 0000000..06cc6ea
85331--- /dev/null
85332+++ b/grsecurity/grsec_fifo.c
85333@@ -0,0 +1,24 @@
85334+#include <linux/kernel.h>
85335+#include <linux/sched.h>
85336+#include <linux/fs.h>
85337+#include <linux/file.h>
85338+#include <linux/grinternal.h>
85339+
85340+int
85341+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
85342+ const struct dentry *dir, const int flag, const int acc_mode)
85343+{
85344+#ifdef CONFIG_GRKERNSEC_FIFO
85345+ const struct cred *cred = current_cred();
85346+
85347+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
85348+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
85349+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
85350+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
85351+ if (!inode_permission(dentry->d_inode, acc_mode))
85352+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
85353+ return -EACCES;
85354+ }
85355+#endif
85356+ return 0;
85357+}
85358diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
85359new file mode 100644
85360index 0000000..8ca18bf
85361--- /dev/null
85362+++ b/grsecurity/grsec_fork.c
85363@@ -0,0 +1,23 @@
85364+#include <linux/kernel.h>
85365+#include <linux/sched.h>
85366+#include <linux/grsecurity.h>
85367+#include <linux/grinternal.h>
85368+#include <linux/errno.h>
85369+
85370+void
85371+gr_log_forkfail(const int retval)
85372+{
85373+#ifdef CONFIG_GRKERNSEC_FORKFAIL
85374+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
85375+ switch (retval) {
85376+ case -EAGAIN:
85377+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
85378+ break;
85379+ case -ENOMEM:
85380+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
85381+ break;
85382+ }
85383+ }
85384+#endif
85385+ return;
85386+}
85387diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
85388new file mode 100644
85389index 0000000..4ed9e7d
85390--- /dev/null
85391+++ b/grsecurity/grsec_init.c
85392@@ -0,0 +1,290 @@
85393+#include <linux/kernel.h>
85394+#include <linux/sched.h>
85395+#include <linux/mm.h>
85396+#include <linux/gracl.h>
85397+#include <linux/slab.h>
85398+#include <linux/vmalloc.h>
85399+#include <linux/percpu.h>
85400+#include <linux/module.h>
85401+
85402+int grsec_enable_ptrace_readexec;
85403+int grsec_enable_setxid;
85404+int grsec_enable_symlinkown;
85405+kgid_t grsec_symlinkown_gid;
85406+int grsec_enable_brute;
85407+int grsec_enable_link;
85408+int grsec_enable_dmesg;
85409+int grsec_enable_harden_ptrace;
85410+int grsec_enable_harden_ipc;
85411+int grsec_enable_fifo;
85412+int grsec_enable_execlog;
85413+int grsec_enable_signal;
85414+int grsec_enable_forkfail;
85415+int grsec_enable_audit_ptrace;
85416+int grsec_enable_time;
85417+int grsec_enable_group;
85418+kgid_t grsec_audit_gid;
85419+int grsec_enable_chdir;
85420+int grsec_enable_mount;
85421+int grsec_enable_rofs;
85422+int grsec_deny_new_usb;
85423+int grsec_enable_chroot_findtask;
85424+int grsec_enable_chroot_mount;
85425+int grsec_enable_chroot_shmat;
85426+int grsec_enable_chroot_fchdir;
85427+int grsec_enable_chroot_double;
85428+int grsec_enable_chroot_pivot;
85429+int grsec_enable_chroot_chdir;
85430+int grsec_enable_chroot_chmod;
85431+int grsec_enable_chroot_mknod;
85432+int grsec_enable_chroot_nice;
85433+int grsec_enable_chroot_execlog;
85434+int grsec_enable_chroot_caps;
85435+int grsec_enable_chroot_rename;
85436+int grsec_enable_chroot_sysctl;
85437+int grsec_enable_chroot_unix;
85438+int grsec_enable_tpe;
85439+kgid_t grsec_tpe_gid;
85440+int grsec_enable_blackhole;
85441+#ifdef CONFIG_IPV6_MODULE
85442+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
85443+#endif
85444+int grsec_lastack_retries;
85445+int grsec_enable_tpe_all;
85446+int grsec_enable_tpe_invert;
85447+int grsec_enable_socket_all;
85448+kgid_t grsec_socket_all_gid;
85449+int grsec_enable_socket_client;
85450+kgid_t grsec_socket_client_gid;
85451+int grsec_enable_socket_server;
85452+kgid_t grsec_socket_server_gid;
85453+int grsec_resource_logging;
85454+int grsec_disable_privio;
85455+int grsec_enable_log_rwxmaps;
85456+int grsec_lock;
85457+
85458+DEFINE_SPINLOCK(grsec_alert_lock);
85459+unsigned long grsec_alert_wtime = 0;
85460+unsigned long grsec_alert_fyet = 0;
85461+
85462+DEFINE_SPINLOCK(grsec_audit_lock);
85463+
85464+DEFINE_RWLOCK(grsec_exec_file_lock);
85465+
85466+char *gr_shared_page[4];
85467+
85468+char *gr_alert_log_fmt;
85469+char *gr_audit_log_fmt;
85470+char *gr_alert_log_buf;
85471+char *gr_audit_log_buf;
85472+
85473+extern struct gr_arg *gr_usermode;
85474+extern unsigned char *gr_system_salt;
85475+extern unsigned char *gr_system_sum;
85476+
85477+void __init
85478+grsecurity_init(void)
85479+{
85480+ int j;
85481+ /* create the per-cpu shared pages */
85482+
85483+#ifdef CONFIG_X86
85484+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
85485+#endif
85486+
85487+ for (j = 0; j < 4; j++) {
85488+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
85489+ if (gr_shared_page[j] == NULL) {
85490+ panic("Unable to allocate grsecurity shared page");
85491+ return;
85492+ }
85493+ }
85494+
85495+ /* allocate log buffers */
85496+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
85497+ if (!gr_alert_log_fmt) {
85498+ panic("Unable to allocate grsecurity alert log format buffer");
85499+ return;
85500+ }
85501+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
85502+ if (!gr_audit_log_fmt) {
85503+ panic("Unable to allocate grsecurity audit log format buffer");
85504+ return;
85505+ }
85506+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
85507+ if (!gr_alert_log_buf) {
85508+ panic("Unable to allocate grsecurity alert log buffer");
85509+ return;
85510+ }
85511+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
85512+ if (!gr_audit_log_buf) {
85513+ panic("Unable to allocate grsecurity audit log buffer");
85514+ return;
85515+ }
85516+
85517+ /* allocate memory for authentication structure */
85518+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
85519+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
85520+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
85521+
85522+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
85523+ panic("Unable to allocate grsecurity authentication structure");
85524+ return;
85525+ }
85526+
85527+#ifdef CONFIG_GRKERNSEC_IO
85528+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
85529+ grsec_disable_privio = 1;
85530+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
85531+ grsec_disable_privio = 1;
85532+#else
85533+ grsec_disable_privio = 0;
85534+#endif
85535+#endif
85536+
85537+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
85538+ /* for backward compatibility, tpe_invert always defaults to on if
85539+ enabled in the kernel
85540+ */
85541+ grsec_enable_tpe_invert = 1;
85542+#endif
85543+
85544+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
85545+#ifndef CONFIG_GRKERNSEC_SYSCTL
85546+ grsec_lock = 1;
85547+#endif
85548+
85549+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
85550+ grsec_enable_log_rwxmaps = 1;
85551+#endif
85552+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
85553+ grsec_enable_group = 1;
85554+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
85555+#endif
85556+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
85557+ grsec_enable_ptrace_readexec = 1;
85558+#endif
85559+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
85560+ grsec_enable_chdir = 1;
85561+#endif
85562+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
85563+ grsec_enable_harden_ptrace = 1;
85564+#endif
85565+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
85566+ grsec_enable_harden_ipc = 1;
85567+#endif
85568+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
85569+ grsec_enable_mount = 1;
85570+#endif
85571+#ifdef CONFIG_GRKERNSEC_LINK
85572+ grsec_enable_link = 1;
85573+#endif
85574+#ifdef CONFIG_GRKERNSEC_BRUTE
85575+ grsec_enable_brute = 1;
85576+#endif
85577+#ifdef CONFIG_GRKERNSEC_DMESG
85578+ grsec_enable_dmesg = 1;
85579+#endif
85580+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
85581+ grsec_enable_blackhole = 1;
85582+ grsec_lastack_retries = 4;
85583+#endif
85584+#ifdef CONFIG_GRKERNSEC_FIFO
85585+ grsec_enable_fifo = 1;
85586+#endif
85587+#ifdef CONFIG_GRKERNSEC_EXECLOG
85588+ grsec_enable_execlog = 1;
85589+#endif
85590+#ifdef CONFIG_GRKERNSEC_SETXID
85591+ grsec_enable_setxid = 1;
85592+#endif
85593+#ifdef CONFIG_GRKERNSEC_SIGNAL
85594+ grsec_enable_signal = 1;
85595+#endif
85596+#ifdef CONFIG_GRKERNSEC_FORKFAIL
85597+ grsec_enable_forkfail = 1;
85598+#endif
85599+#ifdef CONFIG_GRKERNSEC_TIME
85600+ grsec_enable_time = 1;
85601+#endif
85602+#ifdef CONFIG_GRKERNSEC_RESLOG
85603+ grsec_resource_logging = 1;
85604+#endif
85605+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
85606+ grsec_enable_chroot_findtask = 1;
85607+#endif
85608+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
85609+ grsec_enable_chroot_unix = 1;
85610+#endif
85611+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
85612+ grsec_enable_chroot_mount = 1;
85613+#endif
85614+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
85615+ grsec_enable_chroot_fchdir = 1;
85616+#endif
85617+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
85618+ grsec_enable_chroot_shmat = 1;
85619+#endif
85620+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
85621+ grsec_enable_audit_ptrace = 1;
85622+#endif
85623+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
85624+ grsec_enable_chroot_double = 1;
85625+#endif
85626+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
85627+ grsec_enable_chroot_pivot = 1;
85628+#endif
85629+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
85630+ grsec_enable_chroot_chdir = 1;
85631+#endif
85632+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
85633+ grsec_enable_chroot_chmod = 1;
85634+#endif
85635+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
85636+ grsec_enable_chroot_mknod = 1;
85637+#endif
85638+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
85639+ grsec_enable_chroot_nice = 1;
85640+#endif
85641+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
85642+ grsec_enable_chroot_execlog = 1;
85643+#endif
85644+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
85645+ grsec_enable_chroot_caps = 1;
85646+#endif
85647+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
85648+ grsec_enable_chroot_rename = 1;
85649+#endif
85650+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
85651+ grsec_enable_chroot_sysctl = 1;
85652+#endif
85653+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
85654+ grsec_enable_symlinkown = 1;
85655+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
85656+#endif
85657+#ifdef CONFIG_GRKERNSEC_TPE
85658+ grsec_enable_tpe = 1;
85659+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
85660+#ifdef CONFIG_GRKERNSEC_TPE_ALL
85661+ grsec_enable_tpe_all = 1;
85662+#endif
85663+#endif
85664+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
85665+ grsec_enable_socket_all = 1;
85666+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
85667+#endif
85668+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
85669+ grsec_enable_socket_client = 1;
85670+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
85671+#endif
85672+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
85673+ grsec_enable_socket_server = 1;
85674+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
85675+#endif
85676+#endif
85677+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
85678+ grsec_deny_new_usb = 1;
85679+#endif
85680+
85681+ return;
85682+}
85683diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
85684new file mode 100644
85685index 0000000..1773300
85686--- /dev/null
85687+++ b/grsecurity/grsec_ipc.c
85688@@ -0,0 +1,48 @@
85689+#include <linux/kernel.h>
85690+#include <linux/mm.h>
85691+#include <linux/sched.h>
85692+#include <linux/file.h>
85693+#include <linux/ipc.h>
85694+#include <linux/ipc_namespace.h>
85695+#include <linux/grsecurity.h>
85696+#include <linux/grinternal.h>
85697+
85698+int
85699+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
85700+{
85701+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
85702+ int write;
85703+ int orig_granted_mode;
85704+ kuid_t euid;
85705+ kgid_t egid;
85706+
85707+ if (!grsec_enable_harden_ipc)
85708+ return 1;
85709+
85710+ euid = current_euid();
85711+ egid = current_egid();
85712+
85713+ write = requested_mode & 00002;
85714+ orig_granted_mode = ipcp->mode;
85715+
85716+ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
85717+ orig_granted_mode >>= 6;
85718+ else {
85719+ /* if likely wrong permissions, lock to user */
85720+ if (orig_granted_mode & 0007)
85721+ orig_granted_mode = 0;
85722+ /* otherwise do a egid-only check */
85723+ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
85724+ orig_granted_mode >>= 3;
85725+ /* otherwise, no access */
85726+ else
85727+ orig_granted_mode = 0;
85728+ }
85729+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
85730+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
85731+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
85732+ return 0;
85733+ }
85734+#endif
85735+ return 1;
85736+}
85737diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
85738new file mode 100644
85739index 0000000..5e05e20
85740--- /dev/null
85741+++ b/grsecurity/grsec_link.c
85742@@ -0,0 +1,58 @@
85743+#include <linux/kernel.h>
85744+#include <linux/sched.h>
85745+#include <linux/fs.h>
85746+#include <linux/file.h>
85747+#include <linux/grinternal.h>
85748+
85749+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
85750+{
85751+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
85752+ const struct inode *link_inode = link->dentry->d_inode;
85753+
85754+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
85755+ /* ignore root-owned links, e.g. /proc/self */
85756+ gr_is_global_nonroot(link_inode->i_uid) && target &&
85757+ !uid_eq(link_inode->i_uid, target->i_uid)) {
85758+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
85759+ return 1;
85760+ }
85761+#endif
85762+ return 0;
85763+}
85764+
85765+int
85766+gr_handle_follow_link(const struct inode *parent,
85767+ const struct inode *inode,
85768+ const struct dentry *dentry, const struct vfsmount *mnt)
85769+{
85770+#ifdef CONFIG_GRKERNSEC_LINK
85771+ const struct cred *cred = current_cred();
85772+
85773+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
85774+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
85775+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
85776+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
85777+ return -EACCES;
85778+ }
85779+#endif
85780+ return 0;
85781+}
85782+
85783+int
85784+gr_handle_hardlink(const struct dentry *dentry,
85785+ const struct vfsmount *mnt,
85786+ struct inode *inode, const int mode, const struct filename *to)
85787+{
85788+#ifdef CONFIG_GRKERNSEC_LINK
85789+ const struct cred *cred = current_cred();
85790+
85791+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
85792+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
85793+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
85794+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
85795+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
85796+ return -EPERM;
85797+ }
85798+#endif
85799+ return 0;
85800+}
85801diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
85802new file mode 100644
85803index 0000000..dbe0a6b
85804--- /dev/null
85805+++ b/grsecurity/grsec_log.c
85806@@ -0,0 +1,341 @@
85807+#include <linux/kernel.h>
85808+#include <linux/sched.h>
85809+#include <linux/file.h>
85810+#include <linux/tty.h>
85811+#include <linux/fs.h>
85812+#include <linux/mm.h>
85813+#include <linux/grinternal.h>
85814+
85815+#ifdef CONFIG_TREE_PREEMPT_RCU
85816+#define DISABLE_PREEMPT() preempt_disable()
85817+#define ENABLE_PREEMPT() preempt_enable()
85818+#else
85819+#define DISABLE_PREEMPT()
85820+#define ENABLE_PREEMPT()
85821+#endif
85822+
85823+#define BEGIN_LOCKS(x) \
85824+ DISABLE_PREEMPT(); \
85825+ rcu_read_lock(); \
85826+ read_lock(&tasklist_lock); \
85827+ read_lock(&grsec_exec_file_lock); \
85828+ if (x != GR_DO_AUDIT) \
85829+ spin_lock(&grsec_alert_lock); \
85830+ else \
85831+ spin_lock(&grsec_audit_lock)
85832+
85833+#define END_LOCKS(x) \
85834+ if (x != GR_DO_AUDIT) \
85835+ spin_unlock(&grsec_alert_lock); \
85836+ else \
85837+ spin_unlock(&grsec_audit_lock); \
85838+ read_unlock(&grsec_exec_file_lock); \
85839+ read_unlock(&tasklist_lock); \
85840+ rcu_read_unlock(); \
85841+ ENABLE_PREEMPT(); \
85842+ if (x == GR_DONT_AUDIT) \
85843+ gr_handle_alertkill(current)
85844+
85845+enum {
85846+ FLOODING,
85847+ NO_FLOODING
85848+};
85849+
85850+extern char *gr_alert_log_fmt;
85851+extern char *gr_audit_log_fmt;
85852+extern char *gr_alert_log_buf;
85853+extern char *gr_audit_log_buf;
85854+
85855+static int gr_log_start(int audit)
85856+{
85857+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
85858+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
85859+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
85860+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
85861+ unsigned long curr_secs = get_seconds();
85862+
85863+ if (audit == GR_DO_AUDIT)
85864+ goto set_fmt;
85865+
85866+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
85867+ grsec_alert_wtime = curr_secs;
85868+ grsec_alert_fyet = 0;
85869+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
85870+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
85871+ grsec_alert_fyet++;
85872+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
85873+ grsec_alert_wtime = curr_secs;
85874+ grsec_alert_fyet++;
85875+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
85876+ return FLOODING;
85877+ }
85878+ else return FLOODING;
85879+
85880+set_fmt:
85881+#endif
85882+ memset(buf, 0, PAGE_SIZE);
85883+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
85884+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
85885+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
85886+ } else if (current->signal->curr_ip) {
85887+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
85888+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
85889+ } else if (gr_acl_is_enabled()) {
85890+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
85891+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
85892+ } else {
85893+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
85894+ strcpy(buf, fmt);
85895+ }
85896+
85897+ return NO_FLOODING;
85898+}
85899+
85900+static void gr_log_middle(int audit, const char *msg, va_list ap)
85901+ __attribute__ ((format (printf, 2, 0)));
85902+
85903+static void gr_log_middle(int audit, const char *msg, va_list ap)
85904+{
85905+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
85906+ unsigned int len = strlen(buf);
85907+
85908+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
85909+
85910+ return;
85911+}
85912+
85913+static void gr_log_middle_varargs(int audit, const char *msg, ...)
85914+ __attribute__ ((format (printf, 2, 3)));
85915+
85916+static void gr_log_middle_varargs(int audit, const char *msg, ...)
85917+{
85918+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
85919+ unsigned int len = strlen(buf);
85920+ va_list ap;
85921+
85922+ va_start(ap, msg);
85923+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
85924+ va_end(ap);
85925+
85926+ return;
85927+}
85928+
85929+static void gr_log_end(int audit, int append_default)
85930+{
85931+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
85932+ if (append_default) {
85933+ struct task_struct *task = current;
85934+ struct task_struct *parent = task->real_parent;
85935+ const struct cred *cred = __task_cred(task);
85936+ const struct cred *pcred = __task_cred(parent);
85937+ unsigned int len = strlen(buf);
85938+
85939+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
85940+ }
85941+
85942+ printk("%s\n", buf);
85943+
85944+ return;
85945+}
85946+
85947+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
85948+{
85949+ int logtype;
85950+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
85951+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
85952+ void *voidptr = NULL;
85953+ int num1 = 0, num2 = 0;
85954+ unsigned long ulong1 = 0, ulong2 = 0;
85955+ struct dentry *dentry = NULL;
85956+ struct vfsmount *mnt = NULL;
85957+ struct file *file = NULL;
85958+ struct task_struct *task = NULL;
85959+ struct vm_area_struct *vma = NULL;
85960+ const struct cred *cred, *pcred;
85961+ va_list ap;
85962+
85963+ BEGIN_LOCKS(audit);
85964+ logtype = gr_log_start(audit);
85965+ if (logtype == FLOODING) {
85966+ END_LOCKS(audit);
85967+ return;
85968+ }
85969+ va_start(ap, argtypes);
85970+ switch (argtypes) {
85971+ case GR_TTYSNIFF:
85972+ task = va_arg(ap, struct task_struct *);
85973+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
85974+ break;
85975+ case GR_SYSCTL_HIDDEN:
85976+ str1 = va_arg(ap, char *);
85977+ gr_log_middle_varargs(audit, msg, result, str1);
85978+ break;
85979+ case GR_RBAC:
85980+ dentry = va_arg(ap, struct dentry *);
85981+ mnt = va_arg(ap, struct vfsmount *);
85982+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
85983+ break;
85984+ case GR_RBAC_STR:
85985+ dentry = va_arg(ap, struct dentry *);
85986+ mnt = va_arg(ap, struct vfsmount *);
85987+ str1 = va_arg(ap, char *);
85988+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
85989+ break;
85990+ case GR_STR_RBAC:
85991+ str1 = va_arg(ap, char *);
85992+ dentry = va_arg(ap, struct dentry *);
85993+ mnt = va_arg(ap, struct vfsmount *);
85994+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
85995+ break;
85996+ case GR_RBAC_MODE2:
85997+ dentry = va_arg(ap, struct dentry *);
85998+ mnt = va_arg(ap, struct vfsmount *);
85999+ str1 = va_arg(ap, char *);
86000+ str2 = va_arg(ap, char *);
86001+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
86002+ break;
86003+ case GR_RBAC_MODE3:
86004+ dentry = va_arg(ap, struct dentry *);
86005+ mnt = va_arg(ap, struct vfsmount *);
86006+ str1 = va_arg(ap, char *);
86007+ str2 = va_arg(ap, char *);
86008+ str3 = va_arg(ap, char *);
86009+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
86010+ break;
86011+ case GR_FILENAME:
86012+ dentry = va_arg(ap, struct dentry *);
86013+ mnt = va_arg(ap, struct vfsmount *);
86014+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
86015+ break;
86016+ case GR_STR_FILENAME:
86017+ str1 = va_arg(ap, char *);
86018+ dentry = va_arg(ap, struct dentry *);
86019+ mnt = va_arg(ap, struct vfsmount *);
86020+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
86021+ break;
86022+ case GR_FILENAME_STR:
86023+ dentry = va_arg(ap, struct dentry *);
86024+ mnt = va_arg(ap, struct vfsmount *);
86025+ str1 = va_arg(ap, char *);
86026+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
86027+ break;
86028+ case GR_FILENAME_TWO_INT:
86029+ dentry = va_arg(ap, struct dentry *);
86030+ mnt = va_arg(ap, struct vfsmount *);
86031+ num1 = va_arg(ap, int);
86032+ num2 = va_arg(ap, int);
86033+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
86034+ break;
86035+ case GR_FILENAME_TWO_INT_STR:
86036+ dentry = va_arg(ap, struct dentry *);
86037+ mnt = va_arg(ap, struct vfsmount *);
86038+ num1 = va_arg(ap, int);
86039+ num2 = va_arg(ap, int);
86040+ str1 = va_arg(ap, char *);
86041+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
86042+ break;
86043+ case GR_TEXTREL:
86044+ file = va_arg(ap, struct file *);
86045+ ulong1 = va_arg(ap, unsigned long);
86046+ ulong2 = va_arg(ap, unsigned long);
86047+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
86048+ break;
86049+ case GR_PTRACE:
86050+ task = va_arg(ap, struct task_struct *);
86051+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
86052+ break;
86053+ case GR_RESOURCE:
86054+ task = va_arg(ap, struct task_struct *);
86055+ cred = __task_cred(task);
86056+ pcred = __task_cred(task->real_parent);
86057+ ulong1 = va_arg(ap, unsigned long);
86058+ str1 = va_arg(ap, char *);
86059+ ulong2 = va_arg(ap, unsigned long);
86060+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
86061+ break;
86062+ case GR_CAP:
86063+ task = va_arg(ap, struct task_struct *);
86064+ cred = __task_cred(task);
86065+ pcred = __task_cred(task->real_parent);
86066+ str1 = va_arg(ap, char *);
86067+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
86068+ break;
86069+ case GR_SIG:
86070+ str1 = va_arg(ap, char *);
86071+ voidptr = va_arg(ap, void *);
86072+ gr_log_middle_varargs(audit, msg, str1, voidptr);
86073+ break;
86074+ case GR_SIG2:
86075+ task = va_arg(ap, struct task_struct *);
86076+ cred = __task_cred(task);
86077+ pcred = __task_cred(task->real_parent);
86078+ num1 = va_arg(ap, int);
86079+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
86080+ break;
86081+ case GR_CRASH1:
86082+ task = va_arg(ap, struct task_struct *);
86083+ cred = __task_cred(task);
86084+ pcred = __task_cred(task->real_parent);
86085+ ulong1 = va_arg(ap, unsigned long);
86086+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
86087+ break;
86088+ case GR_CRASH2:
86089+ task = va_arg(ap, struct task_struct *);
86090+ cred = __task_cred(task);
86091+ pcred = __task_cred(task->real_parent);
86092+ ulong1 = va_arg(ap, unsigned long);
86093+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
86094+ break;
86095+ case GR_RWXMAP:
86096+ file = va_arg(ap, struct file *);
86097+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
86098+ break;
86099+ case GR_RWXMAPVMA:
86100+ vma = va_arg(ap, struct vm_area_struct *);
86101+ if (vma->vm_file)
86102+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
86103+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
86104+ str1 = "<stack>";
86105+ else if (vma->vm_start <= current->mm->brk &&
86106+ vma->vm_end >= current->mm->start_brk)
86107+ str1 = "<heap>";
86108+ else
86109+ str1 = "<anonymous mapping>";
86110+ gr_log_middle_varargs(audit, msg, str1);
86111+ break;
86112+ case GR_PSACCT:
86113+ {
86114+ unsigned int wday, cday;
86115+ __u8 whr, chr;
86116+ __u8 wmin, cmin;
86117+ __u8 wsec, csec;
86118+ char cur_tty[64] = { 0 };
86119+ char parent_tty[64] = { 0 };
86120+
86121+ task = va_arg(ap, struct task_struct *);
86122+ wday = va_arg(ap, unsigned int);
86123+ cday = va_arg(ap, unsigned int);
86124+ whr = va_arg(ap, int);
86125+ chr = va_arg(ap, int);
86126+ wmin = va_arg(ap, int);
86127+ cmin = va_arg(ap, int);
86128+ wsec = va_arg(ap, int);
86129+ csec = va_arg(ap, int);
86130+ ulong1 = va_arg(ap, unsigned long);
86131+ cred = __task_cred(task);
86132+ pcred = __task_cred(task->real_parent);
86133+
86134+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
86135+ }
86136+ break;
86137+ default:
86138+ gr_log_middle(audit, msg, ap);
86139+ }
86140+ va_end(ap);
86141+ // these don't need DEFAULTSECARGS printed on the end
86142+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
86143+ gr_log_end(audit, 0);
86144+ else
86145+ gr_log_end(audit, 1);
86146+ END_LOCKS(audit);
86147+}
86148diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
86149new file mode 100644
86150index 0000000..0e39d8c
86151--- /dev/null
86152+++ b/grsecurity/grsec_mem.c
86153@@ -0,0 +1,48 @@
86154+#include <linux/kernel.h>
86155+#include <linux/sched.h>
86156+#include <linux/mm.h>
86157+#include <linux/mman.h>
86158+#include <linux/module.h>
86159+#include <linux/grinternal.h>
86160+
86161+void gr_handle_msr_write(void)
86162+{
86163+ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
86164+ return;
86165+}
86166+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
86167+
86168+void
86169+gr_handle_ioperm(void)
86170+{
86171+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
86172+ return;
86173+}
86174+
86175+void
86176+gr_handle_iopl(void)
86177+{
86178+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
86179+ return;
86180+}
86181+
86182+void
86183+gr_handle_mem_readwrite(u64 from, u64 to)
86184+{
86185+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
86186+ return;
86187+}
86188+
86189+void
86190+gr_handle_vm86(void)
86191+{
86192+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
86193+ return;
86194+}
86195+
86196+void
86197+gr_log_badprocpid(const char *entry)
86198+{
86199+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
86200+ return;
86201+}
86202diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
86203new file mode 100644
86204index 0000000..6f9eb73
86205--- /dev/null
86206+++ b/grsecurity/grsec_mount.c
86207@@ -0,0 +1,65 @@
86208+#include <linux/kernel.h>
86209+#include <linux/sched.h>
86210+#include <linux/mount.h>
86211+#include <linux/major.h>
86212+#include <linux/grsecurity.h>
86213+#include <linux/grinternal.h>
86214+
86215+void
86216+gr_log_remount(const char *devname, const int retval)
86217+{
86218+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
86219+ if (grsec_enable_mount && (retval >= 0))
86220+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
86221+#endif
86222+ return;
86223+}
86224+
86225+void
86226+gr_log_unmount(const char *devname, const int retval)
86227+{
86228+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
86229+ if (grsec_enable_mount && (retval >= 0))
86230+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
86231+#endif
86232+ return;
86233+}
86234+
86235+void
86236+gr_log_mount(const char *from, struct path *to, const int retval)
86237+{
86238+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
86239+ if (grsec_enable_mount && (retval >= 0))
86240+ gr_log_str_fs(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to->dentry, to->mnt);
86241+#endif
86242+ return;
86243+}
86244+
86245+int
86246+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
86247+{
86248+#ifdef CONFIG_GRKERNSEC_ROFS
86249+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
86250+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
86251+ return -EPERM;
86252+ } else
86253+ return 0;
86254+#endif
86255+ return 0;
86256+}
86257+
86258+int
86259+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
86260+{
86261+#ifdef CONFIG_GRKERNSEC_ROFS
86262+ struct inode *inode = dentry->d_inode;
86263+
86264+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
86265+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
86266+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
86267+ return -EPERM;
86268+ } else
86269+ return 0;
86270+#endif
86271+ return 0;
86272+}
86273diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
86274new file mode 100644
86275index 0000000..6ee9d50
86276--- /dev/null
86277+++ b/grsecurity/grsec_pax.c
86278@@ -0,0 +1,45 @@
86279+#include <linux/kernel.h>
86280+#include <linux/sched.h>
86281+#include <linux/mm.h>
86282+#include <linux/file.h>
86283+#include <linux/grinternal.h>
86284+#include <linux/grsecurity.h>
86285+
86286+void
86287+gr_log_textrel(struct vm_area_struct * vma)
86288+{
86289+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
86290+ if (grsec_enable_log_rwxmaps)
86291+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
86292+#endif
86293+ return;
86294+}
86295+
86296+void gr_log_ptgnustack(struct file *file)
86297+{
86298+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
86299+ if (grsec_enable_log_rwxmaps)
86300+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
86301+#endif
86302+ return;
86303+}
86304+
86305+void
86306+gr_log_rwxmmap(struct file *file)
86307+{
86308+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
86309+ if (grsec_enable_log_rwxmaps)
86310+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
86311+#endif
86312+ return;
86313+}
86314+
86315+void
86316+gr_log_rwxmprotect(struct vm_area_struct *vma)
86317+{
86318+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
86319+ if (grsec_enable_log_rwxmaps)
86320+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
86321+#endif
86322+ return;
86323+}
86324diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c
86325new file mode 100644
86326index 0000000..2005a3a
86327--- /dev/null
86328+++ b/grsecurity/grsec_proc.c
86329@@ -0,0 +1,20 @@
86330+#include <linux/kernel.h>
86331+#include <linux/sched.h>
86332+#include <linux/grsecurity.h>
86333+#include <linux/grinternal.h>
86334+
86335+int gr_proc_is_restricted(void)
86336+{
86337+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
86338+ const struct cred *cred = current_cred();
86339+#endif
86340+
86341+#ifdef CONFIG_GRKERNSEC_PROC_USER
86342+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
86343+ return -EACCES;
86344+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
86345+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
86346+ return -EACCES;
86347+#endif
86348+ return 0;
86349+}
86350diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
86351new file mode 100644
86352index 0000000..f7f29aa
86353--- /dev/null
86354+++ b/grsecurity/grsec_ptrace.c
86355@@ -0,0 +1,30 @@
86356+#include <linux/kernel.h>
86357+#include <linux/sched.h>
86358+#include <linux/grinternal.h>
86359+#include <linux/security.h>
86360+
86361+void
86362+gr_audit_ptrace(struct task_struct *task)
86363+{
86364+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
86365+ if (grsec_enable_audit_ptrace)
86366+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
86367+#endif
86368+ return;
86369+}
86370+
86371+int
86372+gr_ptrace_readexec(struct file *file, int unsafe_flags)
86373+{
86374+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
86375+ const struct dentry *dentry = file->f_path.dentry;
86376+ const struct vfsmount *mnt = file->f_path.mnt;
86377+
86378+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
86379+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
86380+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
86381+ return -EACCES;
86382+ }
86383+#endif
86384+ return 0;
86385+}
86386diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
86387new file mode 100644
86388index 0000000..3860c7e
86389--- /dev/null
86390+++ b/grsecurity/grsec_sig.c
86391@@ -0,0 +1,236 @@
86392+#include <linux/kernel.h>
86393+#include <linux/sched.h>
86394+#include <linux/fs.h>
86395+#include <linux/delay.h>
86396+#include <linux/grsecurity.h>
86397+#include <linux/grinternal.h>
86398+#include <linux/hardirq.h>
86399+
86400+char *signames[] = {
86401+ [SIGSEGV] = "Segmentation fault",
86402+ [SIGILL] = "Illegal instruction",
86403+ [SIGABRT] = "Abort",
86404+ [SIGBUS] = "Invalid alignment/Bus error"
86405+};
86406+
86407+void
86408+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
86409+{
86410+#ifdef CONFIG_GRKERNSEC_SIGNAL
86411+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
86412+ (sig == SIGABRT) || (sig == SIGBUS))) {
86413+ if (task_pid_nr(t) == task_pid_nr(current)) {
86414+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
86415+ } else {
86416+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
86417+ }
86418+ }
86419+#endif
86420+ return;
86421+}
86422+
86423+int
86424+gr_handle_signal(const struct task_struct *p, const int sig)
86425+{
86426+#ifdef CONFIG_GRKERNSEC
86427+ /* ignore the 0 signal for protected task checks */
86428+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
86429+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
86430+ return -EPERM;
86431+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
86432+ return -EPERM;
86433+ }
86434+#endif
86435+ return 0;
86436+}
86437+
86438+#ifdef CONFIG_GRKERNSEC
86439+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
86440+
86441+int gr_fake_force_sig(int sig, struct task_struct *t)
86442+{
86443+ unsigned long int flags;
86444+ int ret, blocked, ignored;
86445+ struct k_sigaction *action;
86446+
86447+ spin_lock_irqsave(&t->sighand->siglock, flags);
86448+ action = &t->sighand->action[sig-1];
86449+ ignored = action->sa.sa_handler == SIG_IGN;
86450+ blocked = sigismember(&t->blocked, sig);
86451+ if (blocked || ignored) {
86452+ action->sa.sa_handler = SIG_DFL;
86453+ if (blocked) {
86454+ sigdelset(&t->blocked, sig);
86455+ recalc_sigpending_and_wake(t);
86456+ }
86457+ }
86458+ if (action->sa.sa_handler == SIG_DFL)
86459+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
86460+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
86461+
86462+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
86463+
86464+ return ret;
86465+}
86466+#endif
86467+
86468+#define GR_USER_BAN_TIME (15 * 60)
86469+#define GR_DAEMON_BRUTE_TIME (30 * 60)
86470+
86471+void gr_handle_brute_attach(int dumpable)
86472+{
86473+#ifdef CONFIG_GRKERNSEC_BRUTE
86474+ struct task_struct *p = current;
86475+ kuid_t uid = GLOBAL_ROOT_UID;
86476+ int daemon = 0;
86477+
86478+ if (!grsec_enable_brute)
86479+ return;
86480+
86481+ rcu_read_lock();
86482+ read_lock(&tasklist_lock);
86483+ read_lock(&grsec_exec_file_lock);
86484+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
86485+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
86486+ p->real_parent->brute = 1;
86487+ daemon = 1;
86488+ } else {
86489+ const struct cred *cred = __task_cred(p), *cred2;
86490+ struct task_struct *tsk, *tsk2;
86491+
86492+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
86493+ struct user_struct *user;
86494+
86495+ uid = cred->uid;
86496+
86497+ /* this is put upon execution past expiration */
86498+ user = find_user(uid);
86499+ if (user == NULL)
86500+ goto unlock;
86501+ user->suid_banned = 1;
86502+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
86503+ if (user->suid_ban_expires == ~0UL)
86504+ user->suid_ban_expires--;
86505+
86506+ /* only kill other threads of the same binary, from the same user */
86507+ do_each_thread(tsk2, tsk) {
86508+ cred2 = __task_cred(tsk);
86509+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
86510+ gr_fake_force_sig(SIGKILL, tsk);
86511+ } while_each_thread(tsk2, tsk);
86512+ }
86513+ }
86514+unlock:
86515+ read_unlock(&grsec_exec_file_lock);
86516+ read_unlock(&tasklist_lock);
86517+ rcu_read_unlock();
86518+
86519+ if (gr_is_global_nonroot(uid))
86520+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
86521+ else if (daemon)
86522+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
86523+
86524+#endif
86525+ return;
86526+}
86527+
86528+void gr_handle_brute_check(void)
86529+{
86530+#ifdef CONFIG_GRKERNSEC_BRUTE
86531+ struct task_struct *p = current;
86532+
86533+ if (unlikely(p->brute)) {
86534+ if (!grsec_enable_brute)
86535+ p->brute = 0;
86536+ else if (time_before(get_seconds(), p->brute_expires))
86537+ msleep(30 * 1000);
86538+ }
86539+#endif
86540+ return;
86541+}
86542+
86543+void gr_handle_kernel_exploit(void)
86544+{
86545+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
86546+ const struct cred *cred;
86547+ struct task_struct *tsk, *tsk2;
86548+ struct user_struct *user;
86549+ kuid_t uid;
86550+
86551+ if (in_irq() || in_serving_softirq() || in_nmi())
86552+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
86553+
86554+ uid = current_uid();
86555+
86556+ if (gr_is_global_root(uid))
86557+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
86558+ else {
86559+ /* kill all the processes of this user, hold a reference
86560+ to their creds struct, and prevent them from creating
86561+ another process until system reset
86562+ */
86563+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
86564+ GR_GLOBAL_UID(uid));
86565+ /* we intentionally leak this ref */
86566+ user = get_uid(current->cred->user);
86567+ if (user)
86568+ user->kernel_banned = 1;
86569+
86570+ /* kill all processes of this user */
86571+ read_lock(&tasklist_lock);
86572+ do_each_thread(tsk2, tsk) {
86573+ cred = __task_cred(tsk);
86574+ if (uid_eq(cred->uid, uid))
86575+ gr_fake_force_sig(SIGKILL, tsk);
86576+ } while_each_thread(tsk2, tsk);
86577+ read_unlock(&tasklist_lock);
86578+ }
86579+#endif
86580+}
86581+
86582+#ifdef CONFIG_GRKERNSEC_BRUTE
86583+static bool suid_ban_expired(struct user_struct *user)
86584+{
86585+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
86586+ user->suid_banned = 0;
86587+ user->suid_ban_expires = 0;
86588+ free_uid(user);
86589+ return true;
86590+ }
86591+
86592+ return false;
86593+}
86594+#endif
86595+
86596+int gr_process_kernel_exec_ban(void)
86597+{
86598+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
86599+ if (unlikely(current->cred->user->kernel_banned))
86600+ return -EPERM;
86601+#endif
86602+ return 0;
86603+}
86604+
86605+int gr_process_kernel_setuid_ban(struct user_struct *user)
86606+{
86607+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
86608+ if (unlikely(user->kernel_banned))
86609+ gr_fake_force_sig(SIGKILL, current);
86610+#endif
86611+ return 0;
86612+}
86613+
86614+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
86615+{
86616+#ifdef CONFIG_GRKERNSEC_BRUTE
86617+ struct user_struct *user = current->cred->user;
86618+ if (unlikely(user->suid_banned)) {
86619+ if (suid_ban_expired(user))
86620+ return 0;
86621+ /* disallow execution of suid binaries only */
86622+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
86623+ return -EPERM;
86624+ }
86625+#endif
86626+ return 0;
86627+}
86628diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
86629new file mode 100644
86630index 0000000..a523bd2
86631--- /dev/null
86632+++ b/grsecurity/grsec_sock.c
86633@@ -0,0 +1,244 @@
86634+#include <linux/kernel.h>
86635+#include <linux/module.h>
86636+#include <linux/sched.h>
86637+#include <linux/file.h>
86638+#include <linux/net.h>
86639+#include <linux/in.h>
86640+#include <linux/ip.h>
86641+#include <net/sock.h>
86642+#include <net/inet_sock.h>
86643+#include <linux/grsecurity.h>
86644+#include <linux/grinternal.h>
86645+#include <linux/gracl.h>
86646+
86647+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
86648+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
86649+
86650+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
86651+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
86652+
86653+#ifdef CONFIG_UNIX_MODULE
86654+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
86655+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
86656+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
86657+EXPORT_SYMBOL_GPL(gr_handle_create);
86658+#endif
86659+
86660+#ifdef CONFIG_GRKERNSEC
86661+#define gr_conn_table_size 32749
86662+struct conn_table_entry {
86663+ struct conn_table_entry *next;
86664+ struct signal_struct *sig;
86665+};
86666+
86667+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
86668+DEFINE_SPINLOCK(gr_conn_table_lock);
86669+
86670+extern const char * gr_socktype_to_name(unsigned char type);
86671+extern const char * gr_proto_to_name(unsigned char proto);
86672+extern const char * gr_sockfamily_to_name(unsigned char family);
86673+
86674+static int
86675+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
86676+{
86677+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
86678+}
86679+
86680+static int
86681+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
86682+ __u16 sport, __u16 dport)
86683+{
86684+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
86685+ sig->gr_sport == sport && sig->gr_dport == dport))
86686+ return 1;
86687+ else
86688+ return 0;
86689+}
86690+
86691+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
86692+{
86693+ struct conn_table_entry **match;
86694+ unsigned int index;
86695+
86696+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
86697+ sig->gr_sport, sig->gr_dport,
86698+ gr_conn_table_size);
86699+
86700+ newent->sig = sig;
86701+
86702+ match = &gr_conn_table[index];
86703+ newent->next = *match;
86704+ *match = newent;
86705+
86706+ return;
86707+}
86708+
86709+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
86710+{
86711+ struct conn_table_entry *match, *last = NULL;
86712+ unsigned int index;
86713+
86714+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
86715+ sig->gr_sport, sig->gr_dport,
86716+ gr_conn_table_size);
86717+
86718+ match = gr_conn_table[index];
86719+ while (match && !conn_match(match->sig,
86720+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
86721+ sig->gr_dport)) {
86722+ last = match;
86723+ match = match->next;
86724+ }
86725+
86726+ if (match) {
86727+ if (last)
86728+ last->next = match->next;
86729+ else
86730+ gr_conn_table[index] = NULL;
86731+ kfree(match);
86732+ }
86733+
86734+ return;
86735+}
86736+
86737+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
86738+ __u16 sport, __u16 dport)
86739+{
86740+ struct conn_table_entry *match;
86741+ unsigned int index;
86742+
86743+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
86744+
86745+ match = gr_conn_table[index];
86746+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
86747+ match = match->next;
86748+
86749+ if (match)
86750+ return match->sig;
86751+ else
86752+ return NULL;
86753+}
86754+
86755+#endif
86756+
86757+void gr_update_task_in_ip_table(const struct inet_sock *inet)
86758+{
86759+#ifdef CONFIG_GRKERNSEC
86760+ struct signal_struct *sig = current->signal;
86761+ struct conn_table_entry *newent;
86762+
86763+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
86764+ if (newent == NULL)
86765+ return;
86766+ /* no bh lock needed since we are called with bh disabled */
86767+ spin_lock(&gr_conn_table_lock);
86768+ gr_del_task_from_ip_table_nolock(sig);
86769+ sig->gr_saddr = inet->inet_rcv_saddr;
86770+ sig->gr_daddr = inet->inet_daddr;
86771+ sig->gr_sport = inet->inet_sport;
86772+ sig->gr_dport = inet->inet_dport;
86773+ gr_add_to_task_ip_table_nolock(sig, newent);
86774+ spin_unlock(&gr_conn_table_lock);
86775+#endif
86776+ return;
86777+}
86778+
86779+void gr_del_task_from_ip_table(struct task_struct *task)
86780+{
86781+#ifdef CONFIG_GRKERNSEC
86782+ spin_lock_bh(&gr_conn_table_lock);
86783+ gr_del_task_from_ip_table_nolock(task->signal);
86784+ spin_unlock_bh(&gr_conn_table_lock);
86785+#endif
86786+ return;
86787+}
86788+
86789+void
86790+gr_attach_curr_ip(const struct sock *sk)
86791+{
86792+#ifdef CONFIG_GRKERNSEC
86793+ struct signal_struct *p, *set;
86794+ const struct inet_sock *inet = inet_sk(sk);
86795+
86796+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
86797+ return;
86798+
86799+ set = current->signal;
86800+
86801+ spin_lock_bh(&gr_conn_table_lock);
86802+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
86803+ inet->inet_dport, inet->inet_sport);
86804+ if (unlikely(p != NULL)) {
86805+ set->curr_ip = p->curr_ip;
86806+ set->used_accept = 1;
86807+ gr_del_task_from_ip_table_nolock(p);
86808+ spin_unlock_bh(&gr_conn_table_lock);
86809+ return;
86810+ }
86811+ spin_unlock_bh(&gr_conn_table_lock);
86812+
86813+ set->curr_ip = inet->inet_daddr;
86814+ set->used_accept = 1;
86815+#endif
86816+ return;
86817+}
86818+
86819+int
86820+gr_handle_sock_all(const int family, const int type, const int protocol)
86821+{
86822+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
86823+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
86824+ (family != AF_UNIX)) {
86825+ if (family == AF_INET)
86826+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
86827+ else
86828+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
86829+ return -EACCES;
86830+ }
86831+#endif
86832+ return 0;
86833+}
86834+
86835+int
86836+gr_handle_sock_server(const struct sockaddr *sck)
86837+{
86838+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
86839+ if (grsec_enable_socket_server &&
86840+ in_group_p(grsec_socket_server_gid) &&
86841+ sck && (sck->sa_family != AF_UNIX) &&
86842+ (sck->sa_family != AF_LOCAL)) {
86843+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
86844+ return -EACCES;
86845+ }
86846+#endif
86847+ return 0;
86848+}
86849+
86850+int
86851+gr_handle_sock_server_other(const struct sock *sck)
86852+{
86853+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
86854+ if (grsec_enable_socket_server &&
86855+ in_group_p(grsec_socket_server_gid) &&
86856+ sck && (sck->sk_family != AF_UNIX) &&
86857+ (sck->sk_family != AF_LOCAL)) {
86858+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
86859+ return -EACCES;
86860+ }
86861+#endif
86862+ return 0;
86863+}
86864+
86865+int
86866+gr_handle_sock_client(const struct sockaddr *sck)
86867+{
86868+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
86869+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
86870+ sck && (sck->sa_family != AF_UNIX) &&
86871+ (sck->sa_family != AF_LOCAL)) {
86872+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
86873+ return -EACCES;
86874+ }
86875+#endif
86876+ return 0;
86877+}
86878diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
86879new file mode 100644
86880index 0000000..cce889e
86881--- /dev/null
86882+++ b/grsecurity/grsec_sysctl.c
86883@@ -0,0 +1,488 @@
86884+#include <linux/kernel.h>
86885+#include <linux/sched.h>
86886+#include <linux/sysctl.h>
86887+#include <linux/grsecurity.h>
86888+#include <linux/grinternal.h>
86889+
86890+int
86891+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
86892+{
86893+#ifdef CONFIG_GRKERNSEC_SYSCTL
86894+ if (dirname == NULL || name == NULL)
86895+ return 0;
86896+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
86897+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
86898+ return -EACCES;
86899+ }
86900+#endif
86901+ return 0;
86902+}
86903+
86904+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
86905+static int __maybe_unused __read_only one = 1;
86906+#endif
86907+
86908+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
86909+ defined(CONFIG_GRKERNSEC_DENYUSB)
86910+struct ctl_table grsecurity_table[] = {
86911+#ifdef CONFIG_GRKERNSEC_SYSCTL
86912+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
86913+#ifdef CONFIG_GRKERNSEC_IO
86914+ {
86915+ .procname = "disable_priv_io",
86916+ .data = &grsec_disable_privio,
86917+ .maxlen = sizeof(int),
86918+ .mode = 0600,
86919+ .proc_handler = &proc_dointvec,
86920+ },
86921+#endif
86922+#endif
86923+#ifdef CONFIG_GRKERNSEC_LINK
86924+ {
86925+ .procname = "linking_restrictions",
86926+ .data = &grsec_enable_link,
86927+ .maxlen = sizeof(int),
86928+ .mode = 0600,
86929+ .proc_handler = &proc_dointvec,
86930+ },
86931+#endif
86932+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
86933+ {
86934+ .procname = "enforce_symlinksifowner",
86935+ .data = &grsec_enable_symlinkown,
86936+ .maxlen = sizeof(int),
86937+ .mode = 0600,
86938+ .proc_handler = &proc_dointvec,
86939+ },
86940+ {
86941+ .procname = "symlinkown_gid",
86942+ .data = &grsec_symlinkown_gid,
86943+ .maxlen = sizeof(int),
86944+ .mode = 0600,
86945+ .proc_handler = &proc_dointvec,
86946+ },
86947+#endif
86948+#ifdef CONFIG_GRKERNSEC_BRUTE
86949+ {
86950+ .procname = "deter_bruteforce",
86951+ .data = &grsec_enable_brute,
86952+ .maxlen = sizeof(int),
86953+ .mode = 0600,
86954+ .proc_handler = &proc_dointvec,
86955+ },
86956+#endif
86957+#ifdef CONFIG_GRKERNSEC_FIFO
86958+ {
86959+ .procname = "fifo_restrictions",
86960+ .data = &grsec_enable_fifo,
86961+ .maxlen = sizeof(int),
86962+ .mode = 0600,
86963+ .proc_handler = &proc_dointvec,
86964+ },
86965+#endif
86966+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
86967+ {
86968+ .procname = "ptrace_readexec",
86969+ .data = &grsec_enable_ptrace_readexec,
86970+ .maxlen = sizeof(int),
86971+ .mode = 0600,
86972+ .proc_handler = &proc_dointvec,
86973+ },
86974+#endif
86975+#ifdef CONFIG_GRKERNSEC_SETXID
86976+ {
86977+ .procname = "consistent_setxid",
86978+ .data = &grsec_enable_setxid,
86979+ .maxlen = sizeof(int),
86980+ .mode = 0600,
86981+ .proc_handler = &proc_dointvec,
86982+ },
86983+#endif
86984+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86985+ {
86986+ .procname = "ip_blackhole",
86987+ .data = &grsec_enable_blackhole,
86988+ .maxlen = sizeof(int),
86989+ .mode = 0600,
86990+ .proc_handler = &proc_dointvec,
86991+ },
86992+ {
86993+ .procname = "lastack_retries",
86994+ .data = &grsec_lastack_retries,
86995+ .maxlen = sizeof(int),
86996+ .mode = 0600,
86997+ .proc_handler = &proc_dointvec,
86998+ },
86999+#endif
87000+#ifdef CONFIG_GRKERNSEC_EXECLOG
87001+ {
87002+ .procname = "exec_logging",
87003+ .data = &grsec_enable_execlog,
87004+ .maxlen = sizeof(int),
87005+ .mode = 0600,
87006+ .proc_handler = &proc_dointvec,
87007+ },
87008+#endif
87009+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
87010+ {
87011+ .procname = "rwxmap_logging",
87012+ .data = &grsec_enable_log_rwxmaps,
87013+ .maxlen = sizeof(int),
87014+ .mode = 0600,
87015+ .proc_handler = &proc_dointvec,
87016+ },
87017+#endif
87018+#ifdef CONFIG_GRKERNSEC_SIGNAL
87019+ {
87020+ .procname = "signal_logging",
87021+ .data = &grsec_enable_signal,
87022+ .maxlen = sizeof(int),
87023+ .mode = 0600,
87024+ .proc_handler = &proc_dointvec,
87025+ },
87026+#endif
87027+#ifdef CONFIG_GRKERNSEC_FORKFAIL
87028+ {
87029+ .procname = "forkfail_logging",
87030+ .data = &grsec_enable_forkfail,
87031+ .maxlen = sizeof(int),
87032+ .mode = 0600,
87033+ .proc_handler = &proc_dointvec,
87034+ },
87035+#endif
87036+#ifdef CONFIG_GRKERNSEC_TIME
87037+ {
87038+ .procname = "timechange_logging",
87039+ .data = &grsec_enable_time,
87040+ .maxlen = sizeof(int),
87041+ .mode = 0600,
87042+ .proc_handler = &proc_dointvec,
87043+ },
87044+#endif
87045+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
87046+ {
87047+ .procname = "chroot_deny_shmat",
87048+ .data = &grsec_enable_chroot_shmat,
87049+ .maxlen = sizeof(int),
87050+ .mode = 0600,
87051+ .proc_handler = &proc_dointvec,
87052+ },
87053+#endif
87054+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
87055+ {
87056+ .procname = "chroot_deny_unix",
87057+ .data = &grsec_enable_chroot_unix,
87058+ .maxlen = sizeof(int),
87059+ .mode = 0600,
87060+ .proc_handler = &proc_dointvec,
87061+ },
87062+#endif
87063+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
87064+ {
87065+ .procname = "chroot_deny_mount",
87066+ .data = &grsec_enable_chroot_mount,
87067+ .maxlen = sizeof(int),
87068+ .mode = 0600,
87069+ .proc_handler = &proc_dointvec,
87070+ },
87071+#endif
87072+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
87073+ {
87074+ .procname = "chroot_deny_fchdir",
87075+ .data = &grsec_enable_chroot_fchdir,
87076+ .maxlen = sizeof(int),
87077+ .mode = 0600,
87078+ .proc_handler = &proc_dointvec,
87079+ },
87080+#endif
87081+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
87082+ {
87083+ .procname = "chroot_deny_chroot",
87084+ .data = &grsec_enable_chroot_double,
87085+ .maxlen = sizeof(int),
87086+ .mode = 0600,
87087+ .proc_handler = &proc_dointvec,
87088+ },
87089+#endif
87090+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
87091+ {
87092+ .procname = "chroot_deny_pivot",
87093+ .data = &grsec_enable_chroot_pivot,
87094+ .maxlen = sizeof(int),
87095+ .mode = 0600,
87096+ .proc_handler = &proc_dointvec,
87097+ },
87098+#endif
87099+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
87100+ {
87101+ .procname = "chroot_enforce_chdir",
87102+ .data = &grsec_enable_chroot_chdir,
87103+ .maxlen = sizeof(int),
87104+ .mode = 0600,
87105+ .proc_handler = &proc_dointvec,
87106+ },
87107+#endif
87108+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
87109+ {
87110+ .procname = "chroot_deny_chmod",
87111+ .data = &grsec_enable_chroot_chmod,
87112+ .maxlen = sizeof(int),
87113+ .mode = 0600,
87114+ .proc_handler = &proc_dointvec,
87115+ },
87116+#endif
87117+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
87118+ {
87119+ .procname = "chroot_deny_mknod",
87120+ .data = &grsec_enable_chroot_mknod,
87121+ .maxlen = sizeof(int),
87122+ .mode = 0600,
87123+ .proc_handler = &proc_dointvec,
87124+ },
87125+#endif
87126+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
87127+ {
87128+ .procname = "chroot_restrict_nice",
87129+ .data = &grsec_enable_chroot_nice,
87130+ .maxlen = sizeof(int),
87131+ .mode = 0600,
87132+ .proc_handler = &proc_dointvec,
87133+ },
87134+#endif
87135+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
87136+ {
87137+ .procname = "chroot_execlog",
87138+ .data = &grsec_enable_chroot_execlog,
87139+ .maxlen = sizeof(int),
87140+ .mode = 0600,
87141+ .proc_handler = &proc_dointvec,
87142+ },
87143+#endif
87144+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
87145+ {
87146+ .procname = "chroot_caps",
87147+ .data = &grsec_enable_chroot_caps,
87148+ .maxlen = sizeof(int),
87149+ .mode = 0600,
87150+ .proc_handler = &proc_dointvec,
87151+ },
87152+#endif
87153+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
87154+ {
87155+ .procname = "chroot_deny_bad_rename",
87156+ .data = &grsec_enable_chroot_rename,
87157+ .maxlen = sizeof(int),
87158+ .mode = 0600,
87159+ .proc_handler = &proc_dointvec,
87160+ },
87161+#endif
87162+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
87163+ {
87164+ .procname = "chroot_deny_sysctl",
87165+ .data = &grsec_enable_chroot_sysctl,
87166+ .maxlen = sizeof(int),
87167+ .mode = 0600,
87168+ .proc_handler = &proc_dointvec,
87169+ },
87170+#endif
87171+#ifdef CONFIG_GRKERNSEC_TPE
87172+ {
87173+ .procname = "tpe",
87174+ .data = &grsec_enable_tpe,
87175+ .maxlen = sizeof(int),
87176+ .mode = 0600,
87177+ .proc_handler = &proc_dointvec,
87178+ },
87179+ {
87180+ .procname = "tpe_gid",
87181+ .data = &grsec_tpe_gid,
87182+ .maxlen = sizeof(int),
87183+ .mode = 0600,
87184+ .proc_handler = &proc_dointvec,
87185+ },
87186+#endif
87187+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
87188+ {
87189+ .procname = "tpe_invert",
87190+ .data = &grsec_enable_tpe_invert,
87191+ .maxlen = sizeof(int),
87192+ .mode = 0600,
87193+ .proc_handler = &proc_dointvec,
87194+ },
87195+#endif
87196+#ifdef CONFIG_GRKERNSEC_TPE_ALL
87197+ {
87198+ .procname = "tpe_restrict_all",
87199+ .data = &grsec_enable_tpe_all,
87200+ .maxlen = sizeof(int),
87201+ .mode = 0600,
87202+ .proc_handler = &proc_dointvec,
87203+ },
87204+#endif
87205+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
87206+ {
87207+ .procname = "socket_all",
87208+ .data = &grsec_enable_socket_all,
87209+ .maxlen = sizeof(int),
87210+ .mode = 0600,
87211+ .proc_handler = &proc_dointvec,
87212+ },
87213+ {
87214+ .procname = "socket_all_gid",
87215+ .data = &grsec_socket_all_gid,
87216+ .maxlen = sizeof(int),
87217+ .mode = 0600,
87218+ .proc_handler = &proc_dointvec,
87219+ },
87220+#endif
87221+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
87222+ {
87223+ .procname = "socket_client",
87224+ .data = &grsec_enable_socket_client,
87225+ .maxlen = sizeof(int),
87226+ .mode = 0600,
87227+ .proc_handler = &proc_dointvec,
87228+ },
87229+ {
87230+ .procname = "socket_client_gid",
87231+ .data = &grsec_socket_client_gid,
87232+ .maxlen = sizeof(int),
87233+ .mode = 0600,
87234+ .proc_handler = &proc_dointvec,
87235+ },
87236+#endif
87237+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
87238+ {
87239+ .procname = "socket_server",
87240+ .data = &grsec_enable_socket_server,
87241+ .maxlen = sizeof(int),
87242+ .mode = 0600,
87243+ .proc_handler = &proc_dointvec,
87244+ },
87245+ {
87246+ .procname = "socket_server_gid",
87247+ .data = &grsec_socket_server_gid,
87248+ .maxlen = sizeof(int),
87249+ .mode = 0600,
87250+ .proc_handler = &proc_dointvec,
87251+ },
87252+#endif
87253+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
87254+ {
87255+ .procname = "audit_group",
87256+ .data = &grsec_enable_group,
87257+ .maxlen = sizeof(int),
87258+ .mode = 0600,
87259+ .proc_handler = &proc_dointvec,
87260+ },
87261+ {
87262+ .procname = "audit_gid",
87263+ .data = &grsec_audit_gid,
87264+ .maxlen = sizeof(int),
87265+ .mode = 0600,
87266+ .proc_handler = &proc_dointvec,
87267+ },
87268+#endif
87269+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
87270+ {
87271+ .procname = "audit_chdir",
87272+ .data = &grsec_enable_chdir,
87273+ .maxlen = sizeof(int),
87274+ .mode = 0600,
87275+ .proc_handler = &proc_dointvec,
87276+ },
87277+#endif
87278+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
87279+ {
87280+ .procname = "audit_mount",
87281+ .data = &grsec_enable_mount,
87282+ .maxlen = sizeof(int),
87283+ .mode = 0600,
87284+ .proc_handler = &proc_dointvec,
87285+ },
87286+#endif
87287+#ifdef CONFIG_GRKERNSEC_DMESG
87288+ {
87289+ .procname = "dmesg",
87290+ .data = &grsec_enable_dmesg,
87291+ .maxlen = sizeof(int),
87292+ .mode = 0600,
87293+ .proc_handler = &proc_dointvec,
87294+ },
87295+#endif
87296+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
87297+ {
87298+ .procname = "chroot_findtask",
87299+ .data = &grsec_enable_chroot_findtask,
87300+ .maxlen = sizeof(int),
87301+ .mode = 0600,
87302+ .proc_handler = &proc_dointvec,
87303+ },
87304+#endif
87305+#ifdef CONFIG_GRKERNSEC_RESLOG
87306+ {
87307+ .procname = "resource_logging",
87308+ .data = &grsec_resource_logging,
87309+ .maxlen = sizeof(int),
87310+ .mode = 0600,
87311+ .proc_handler = &proc_dointvec,
87312+ },
87313+#endif
87314+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
87315+ {
87316+ .procname = "audit_ptrace",
87317+ .data = &grsec_enable_audit_ptrace,
87318+ .maxlen = sizeof(int),
87319+ .mode = 0600,
87320+ .proc_handler = &proc_dointvec,
87321+ },
87322+#endif
87323+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
87324+ {
87325+ .procname = "harden_ptrace",
87326+ .data = &grsec_enable_harden_ptrace,
87327+ .maxlen = sizeof(int),
87328+ .mode = 0600,
87329+ .proc_handler = &proc_dointvec,
87330+ },
87331+#endif
87332+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
87333+ {
87334+ .procname = "harden_ipc",
87335+ .data = &grsec_enable_harden_ipc,
87336+ .maxlen = sizeof(int),
87337+ .mode = 0600,
87338+ .proc_handler = &proc_dointvec,
87339+ },
87340+#endif
87341+ {
87342+ .procname = "grsec_lock",
87343+ .data = &grsec_lock,
87344+ .maxlen = sizeof(int),
87345+ .mode = 0600,
87346+ .proc_handler = &proc_dointvec,
87347+ },
87348+#endif
87349+#ifdef CONFIG_GRKERNSEC_ROFS
87350+ {
87351+ .procname = "romount_protect",
87352+ .data = &grsec_enable_rofs,
87353+ .maxlen = sizeof(int),
87354+ .mode = 0600,
87355+ .proc_handler = &proc_dointvec_minmax,
87356+ .extra1 = &one,
87357+ .extra2 = &one,
87358+ },
87359+#endif
87360+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
87361+ {
87362+ .procname = "deny_new_usb",
87363+ .data = &grsec_deny_new_usb,
87364+ .maxlen = sizeof(int),
87365+ .mode = 0600,
87366+ .proc_handler = &proc_dointvec,
87367+ },
87368+#endif
87369+ { }
87370+};
87371+#endif
87372diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
87373new file mode 100644
87374index 0000000..61b514e
87375--- /dev/null
87376+++ b/grsecurity/grsec_time.c
87377@@ -0,0 +1,16 @@
87378+#include <linux/kernel.h>
87379+#include <linux/sched.h>
87380+#include <linux/grinternal.h>
87381+#include <linux/module.h>
87382+
87383+void
87384+gr_log_timechange(void)
87385+{
87386+#ifdef CONFIG_GRKERNSEC_TIME
87387+ if (grsec_enable_time)
87388+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
87389+#endif
87390+ return;
87391+}
87392+
87393+EXPORT_SYMBOL_GPL(gr_log_timechange);
87394diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
87395new file mode 100644
87396index 0000000..d1953de
87397--- /dev/null
87398+++ b/grsecurity/grsec_tpe.c
87399@@ -0,0 +1,78 @@
87400+#include <linux/kernel.h>
87401+#include <linux/sched.h>
87402+#include <linux/file.h>
87403+#include <linux/fs.h>
87404+#include <linux/grinternal.h>
87405+
87406+extern int gr_acl_tpe_check(void);
87407+
87408+int
87409+gr_tpe_allow(const struct file *file)
87410+{
87411+#ifdef CONFIG_GRKERNSEC
87412+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
87413+ struct inode *file_inode = file->f_path.dentry->d_inode;
87414+ const struct cred *cred = current_cred();
87415+ char *msg = NULL;
87416+ char *msg2 = NULL;
87417+
87418+ // never restrict root
87419+ if (gr_is_global_root(cred->uid))
87420+ return 1;
87421+
87422+ if (grsec_enable_tpe) {
87423+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
87424+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
87425+ msg = "not being in trusted group";
87426+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
87427+ msg = "being in untrusted group";
87428+#else
87429+ if (in_group_p(grsec_tpe_gid))
87430+ msg = "being in untrusted group";
87431+#endif
87432+ }
87433+ if (!msg && gr_acl_tpe_check())
87434+ msg = "being in untrusted role";
87435+
87436+ // not in any affected group/role
87437+ if (!msg)
87438+ goto next_check;
87439+
87440+ if (gr_is_global_nonroot(inode->i_uid))
87441+ msg2 = "file in non-root-owned directory";
87442+ else if (inode->i_mode & S_IWOTH)
87443+ msg2 = "file in world-writable directory";
87444+ else if (inode->i_mode & S_IWGRP)
87445+ msg2 = "file in group-writable directory";
87446+ else if (file_inode->i_mode & S_IWOTH)
87447+ msg2 = "file is world-writable";
87448+
87449+ if (msg && msg2) {
87450+ char fullmsg[70] = {0};
87451+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
87452+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
87453+ return 0;
87454+ }
87455+ msg = NULL;
87456+next_check:
87457+#ifdef CONFIG_GRKERNSEC_TPE_ALL
87458+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
87459+ return 1;
87460+
87461+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
87462+ msg = "directory not owned by user";
87463+ else if (inode->i_mode & S_IWOTH)
87464+ msg = "file in world-writable directory";
87465+ else if (inode->i_mode & S_IWGRP)
87466+ msg = "file in group-writable directory";
87467+ else if (file_inode->i_mode & S_IWOTH)
87468+ msg = "file is world-writable";
87469+
87470+ if (msg) {
87471+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
87472+ return 0;
87473+ }
87474+#endif
87475+#endif
87476+ return 1;
87477+}
87478diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
87479new file mode 100644
87480index 0000000..ae02d8e
87481--- /dev/null
87482+++ b/grsecurity/grsec_usb.c
87483@@ -0,0 +1,15 @@
87484+#include <linux/kernel.h>
87485+#include <linux/grinternal.h>
87486+#include <linux/module.h>
87487+
87488+int gr_handle_new_usb(void)
87489+{
87490+#ifdef CONFIG_GRKERNSEC_DENYUSB
87491+ if (grsec_deny_new_usb) {
87492+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
87493+ return 1;
87494+ }
87495+#endif
87496+ return 0;
87497+}
87498+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
87499diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
87500new file mode 100644
87501index 0000000..158b330
87502--- /dev/null
87503+++ b/grsecurity/grsum.c
87504@@ -0,0 +1,64 @@
87505+#include <linux/err.h>
87506+#include <linux/kernel.h>
87507+#include <linux/sched.h>
87508+#include <linux/mm.h>
87509+#include <linux/scatterlist.h>
87510+#include <linux/crypto.h>
87511+#include <linux/gracl.h>
87512+
87513+
87514+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
87515+#error "crypto and sha256 must be built into the kernel"
87516+#endif
87517+
87518+int
87519+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
87520+{
87521+ struct crypto_hash *tfm;
87522+ struct hash_desc desc;
87523+ struct scatterlist sg[2];
87524+ unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long))));
87525+ unsigned long *tmpsumptr = (unsigned long *)temp_sum;
87526+ unsigned long *sumptr = (unsigned long *)sum;
87527+ int cryptres;
87528+ int retval = 1;
87529+ volatile int mismatched = 0;
87530+ volatile int dummy = 0;
87531+ unsigned int i;
87532+
87533+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
87534+ if (IS_ERR(tfm)) {
87535+ /* should never happen, since sha256 should be built in */
87536+ memset(entry->pw, 0, GR_PW_LEN);
87537+ return 1;
87538+ }
87539+
87540+ sg_init_table(sg, 2);
87541+ sg_set_buf(&sg[0], salt, GR_SALT_LEN);
87542+ sg_set_buf(&sg[1], entry->pw, strlen(entry->pw));
87543+
87544+ desc.tfm = tfm;
87545+ desc.flags = 0;
87546+
87547+ cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw),
87548+ temp_sum);
87549+
87550+ memset(entry->pw, 0, GR_PW_LEN);
87551+
87552+ if (cryptres)
87553+ goto out;
87554+
87555+ for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++)
87556+ if (sumptr[i] != tmpsumptr[i])
87557+ mismatched = 1;
87558+ else
87559+ dummy = 1; // waste a cycle
87560+
87561+ if (!mismatched)
87562+ retval = dummy - 1;
87563+
87564+out:
87565+ crypto_free_hash(tfm);
87566+
87567+ return retval;
87568+}
87569diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
87570index 5bdab6b..9ae82fe 100644
87571--- a/include/asm-generic/4level-fixup.h
87572+++ b/include/asm-generic/4level-fixup.h
87573@@ -14,8 +14,10 @@
87574 #define pmd_alloc(mm, pud, address) \
87575 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
87576 NULL: pmd_offset(pud, address))
87577+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
87578
87579 #define pud_alloc(mm, pgd, address) (pgd)
87580+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
87581 #define pud_offset(pgd, start) (pgd)
87582 #define pud_none(pud) 0
87583 #define pud_bad(pud) 0
87584diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
87585index b7babf0..1e4b4f1 100644
87586--- a/include/asm-generic/atomic-long.h
87587+++ b/include/asm-generic/atomic-long.h
87588@@ -22,6 +22,12 @@
87589
87590 typedef atomic64_t atomic_long_t;
87591
87592+#ifdef CONFIG_PAX_REFCOUNT
87593+typedef atomic64_unchecked_t atomic_long_unchecked_t;
87594+#else
87595+typedef atomic64_t atomic_long_unchecked_t;
87596+#endif
87597+
87598 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
87599
87600 static inline long atomic_long_read(atomic_long_t *l)
87601@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
87602 return (long)atomic64_read(v);
87603 }
87604
87605+#ifdef CONFIG_PAX_REFCOUNT
87606+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
87607+{
87608+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
87609+
87610+ return (long)atomic64_read_unchecked(v);
87611+}
87612+#endif
87613+
87614 static inline void atomic_long_set(atomic_long_t *l, long i)
87615 {
87616 atomic64_t *v = (atomic64_t *)l;
87617@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
87618 atomic64_set(v, i);
87619 }
87620
87621+#ifdef CONFIG_PAX_REFCOUNT
87622+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
87623+{
87624+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
87625+
87626+ atomic64_set_unchecked(v, i);
87627+}
87628+#endif
87629+
87630 static inline void atomic_long_inc(atomic_long_t *l)
87631 {
87632 atomic64_t *v = (atomic64_t *)l;
87633@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
87634 atomic64_inc(v);
87635 }
87636
87637+#ifdef CONFIG_PAX_REFCOUNT
87638+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
87639+{
87640+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
87641+
87642+ atomic64_inc_unchecked(v);
87643+}
87644+#endif
87645+
87646 static inline void atomic_long_dec(atomic_long_t *l)
87647 {
87648 atomic64_t *v = (atomic64_t *)l;
87649@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
87650 atomic64_dec(v);
87651 }
87652
87653+#ifdef CONFIG_PAX_REFCOUNT
87654+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
87655+{
87656+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
87657+
87658+ atomic64_dec_unchecked(v);
87659+}
87660+#endif
87661+
87662 static inline void atomic_long_add(long i, atomic_long_t *l)
87663 {
87664 atomic64_t *v = (atomic64_t *)l;
87665@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
87666 atomic64_add(i, v);
87667 }
87668
87669+#ifdef CONFIG_PAX_REFCOUNT
87670+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
87671+{
87672+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
87673+
87674+ atomic64_add_unchecked(i, v);
87675+}
87676+#endif
87677+
87678 static inline void atomic_long_sub(long i, atomic_long_t *l)
87679 {
87680 atomic64_t *v = (atomic64_t *)l;
87681@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
87682 atomic64_sub(i, v);
87683 }
87684
87685+#ifdef CONFIG_PAX_REFCOUNT
87686+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
87687+{
87688+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
87689+
87690+ atomic64_sub_unchecked(i, v);
87691+}
87692+#endif
87693+
87694 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
87695 {
87696 atomic64_t *v = (atomic64_t *)l;
87697@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
87698 return atomic64_add_negative(i, v);
87699 }
87700
87701-static inline long atomic_long_add_return(long i, atomic_long_t *l)
87702+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
87703 {
87704 atomic64_t *v = (atomic64_t *)l;
87705
87706 return (long)atomic64_add_return(i, v);
87707 }
87708
87709+#ifdef CONFIG_PAX_REFCOUNT
87710+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
87711+{
87712+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
87713+
87714+ return (long)atomic64_add_return_unchecked(i, v);
87715+}
87716+#endif
87717+
87718 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
87719 {
87720 atomic64_t *v = (atomic64_t *)l;
87721@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
87722 return (long)atomic64_inc_return(v);
87723 }
87724
87725+#ifdef CONFIG_PAX_REFCOUNT
87726+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
87727+{
87728+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
87729+
87730+ return (long)atomic64_inc_return_unchecked(v);
87731+}
87732+#endif
87733+
87734 static inline long atomic_long_dec_return(atomic_long_t *l)
87735 {
87736 atomic64_t *v = (atomic64_t *)l;
87737@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
87738
87739 typedef atomic_t atomic_long_t;
87740
87741+#ifdef CONFIG_PAX_REFCOUNT
87742+typedef atomic_unchecked_t atomic_long_unchecked_t;
87743+#else
87744+typedef atomic_t atomic_long_unchecked_t;
87745+#endif
87746+
87747 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
87748 static inline long atomic_long_read(atomic_long_t *l)
87749 {
87750@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
87751 return (long)atomic_read(v);
87752 }
87753
87754+#ifdef CONFIG_PAX_REFCOUNT
87755+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
87756+{
87757+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
87758+
87759+ return (long)atomic_read_unchecked(v);
87760+}
87761+#endif
87762+
87763 static inline void atomic_long_set(atomic_long_t *l, long i)
87764 {
87765 atomic_t *v = (atomic_t *)l;
87766@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
87767 atomic_set(v, i);
87768 }
87769
87770+#ifdef CONFIG_PAX_REFCOUNT
87771+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
87772+{
87773+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
87774+
87775+ atomic_set_unchecked(v, i);
87776+}
87777+#endif
87778+
87779 static inline void atomic_long_inc(atomic_long_t *l)
87780 {
87781 atomic_t *v = (atomic_t *)l;
87782@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
87783 atomic_inc(v);
87784 }
87785
87786+#ifdef CONFIG_PAX_REFCOUNT
87787+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
87788+{
87789+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
87790+
87791+ atomic_inc_unchecked(v);
87792+}
87793+#endif
87794+
87795 static inline void atomic_long_dec(atomic_long_t *l)
87796 {
87797 atomic_t *v = (atomic_t *)l;
87798@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
87799 atomic_dec(v);
87800 }
87801
87802+#ifdef CONFIG_PAX_REFCOUNT
87803+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
87804+{
87805+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
87806+
87807+ atomic_dec_unchecked(v);
87808+}
87809+#endif
87810+
87811 static inline void atomic_long_add(long i, atomic_long_t *l)
87812 {
87813 atomic_t *v = (atomic_t *)l;
87814@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
87815 atomic_add(i, v);
87816 }
87817
87818+#ifdef CONFIG_PAX_REFCOUNT
87819+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
87820+{
87821+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
87822+
87823+ atomic_add_unchecked(i, v);
87824+}
87825+#endif
87826+
87827 static inline void atomic_long_sub(long i, atomic_long_t *l)
87828 {
87829 atomic_t *v = (atomic_t *)l;
87830@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
87831 atomic_sub(i, v);
87832 }
87833
87834+#ifdef CONFIG_PAX_REFCOUNT
87835+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
87836+{
87837+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
87838+
87839+ atomic_sub_unchecked(i, v);
87840+}
87841+#endif
87842+
87843 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
87844 {
87845 atomic_t *v = (atomic_t *)l;
87846@@ -211,13 +349,23 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
87847 return atomic_add_negative(i, v);
87848 }
87849
87850-static inline long atomic_long_add_return(long i, atomic_long_t *l)
87851+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
87852 {
87853 atomic_t *v = (atomic_t *)l;
87854
87855 return (long)atomic_add_return(i, v);
87856 }
87857
87858+#ifdef CONFIG_PAX_REFCOUNT
87859+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
87860+{
87861+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
87862+
87863+ return (long)atomic_add_return_unchecked(i, v);
87864+}
87865+
87866+#endif
87867+
87868 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
87869 {
87870 atomic_t *v = (atomic_t *)l;
87871@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
87872 return (long)atomic_inc_return(v);
87873 }
87874
87875+#ifdef CONFIG_PAX_REFCOUNT
87876+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
87877+{
87878+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
87879+
87880+ return (long)atomic_inc_return_unchecked(v);
87881+}
87882+#endif
87883+
87884 static inline long atomic_long_dec_return(atomic_long_t *l)
87885 {
87886 atomic_t *v = (atomic_t *)l;
87887@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
87888
87889 #endif /* BITS_PER_LONG == 64 */
87890
87891+#ifdef CONFIG_PAX_REFCOUNT
87892+static inline void pax_refcount_needs_these_functions(void)
87893+{
87894+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
87895+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
87896+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
87897+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
87898+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
87899+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
87900+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
87901+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
87902+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
87903+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
87904+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
87905+#ifdef CONFIG_X86
87906+ atomic_clear_mask_unchecked(0, NULL);
87907+ atomic_set_mask_unchecked(0, NULL);
87908+#endif
87909+
87910+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
87911+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
87912+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
87913+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
87914+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
87915+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
87916+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
87917+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
87918+}
87919+#else
87920+#define atomic_read_unchecked(v) atomic_read(v)
87921+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
87922+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
87923+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
87924+#define atomic_inc_unchecked(v) atomic_inc(v)
87925+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
87926+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
87927+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
87928+#define atomic_dec_unchecked(v) atomic_dec(v)
87929+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
87930+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
87931+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
87932+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
87933+
87934+#define atomic_long_read_unchecked(v) atomic_long_read(v)
87935+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
87936+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
87937+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
87938+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
87939+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
87940+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
87941+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
87942+#endif
87943+
87944 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
87945diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
87946index 30ad9c8..c70c170 100644
87947--- a/include/asm-generic/atomic64.h
87948+++ b/include/asm-generic/atomic64.h
87949@@ -16,6 +16,8 @@ typedef struct {
87950 long long counter;
87951 } atomic64_t;
87952
87953+typedef atomic64_t atomic64_unchecked_t;
87954+
87955 #define ATOMIC64_INIT(i) { (i) }
87956
87957 extern long long atomic64_read(const atomic64_t *v);
87958@@ -51,4 +53,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
87959 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
87960 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
87961
87962+#define atomic64_read_unchecked(v) atomic64_read(v)
87963+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
87964+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
87965+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
87966+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
87967+#define atomic64_inc_unchecked(v) atomic64_inc(v)
87968+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
87969+#define atomic64_dec_unchecked(v) atomic64_dec(v)
87970+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
87971+
87972 #endif /* _ASM_GENERIC_ATOMIC64_H */
87973diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
87974index f5c40b0..e902f9d 100644
87975--- a/include/asm-generic/barrier.h
87976+++ b/include/asm-generic/barrier.h
87977@@ -82,7 +82,7 @@
87978 do { \
87979 compiletime_assert_atomic_type(*p); \
87980 smp_mb(); \
87981- ACCESS_ONCE(*p) = (v); \
87982+ ACCESS_ONCE_RW(*p) = (v); \
87983 } while (0)
87984
87985 #define smp_load_acquire(p) \
87986diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
87987index a60a7cc..0fe12f2 100644
87988--- a/include/asm-generic/bitops/__fls.h
87989+++ b/include/asm-generic/bitops/__fls.h
87990@@ -9,7 +9,7 @@
87991 *
87992 * Undefined if no set bit exists, so code should check against 0 first.
87993 */
87994-static __always_inline unsigned long __fls(unsigned long word)
87995+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
87996 {
87997 int num = BITS_PER_LONG - 1;
87998
87999diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
88000index 0576d1f..dad6c71 100644
88001--- a/include/asm-generic/bitops/fls.h
88002+++ b/include/asm-generic/bitops/fls.h
88003@@ -9,7 +9,7 @@
88004 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
88005 */
88006
88007-static __always_inline int fls(int x)
88008+static __always_inline int __intentional_overflow(-1) fls(int x)
88009 {
88010 int r = 32;
88011
88012diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
88013index b097cf8..3d40e14 100644
88014--- a/include/asm-generic/bitops/fls64.h
88015+++ b/include/asm-generic/bitops/fls64.h
88016@@ -15,7 +15,7 @@
88017 * at position 64.
88018 */
88019 #if BITS_PER_LONG == 32
88020-static __always_inline int fls64(__u64 x)
88021+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
88022 {
88023 __u32 h = x >> 32;
88024 if (h)
88025@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
88026 return fls(x);
88027 }
88028 #elif BITS_PER_LONG == 64
88029-static __always_inline int fls64(__u64 x)
88030+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
88031 {
88032 if (x == 0)
88033 return 0;
88034diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
88035index 630dd23..8c1dcb6b 100644
88036--- a/include/asm-generic/bug.h
88037+++ b/include/asm-generic/bug.h
88038@@ -62,13 +62,13 @@ struct bug_entry {
88039 * to provide better diagnostics.
88040 */
88041 #ifndef __WARN_TAINT
88042-extern __printf(3, 4)
88043+extern __printf(3, 4) __nocapture(1, 3, 4)
88044 void warn_slowpath_fmt(const char *file, const int line,
88045 const char *fmt, ...);
88046-extern __printf(4, 5)
88047+extern __printf(4, 5) __nocapture(1, 4, 5)
88048 void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint,
88049 const char *fmt, ...);
88050-extern void warn_slowpath_null(const char *file, const int line);
88051+extern __nocapture(1) void warn_slowpath_null(const char *file, const int line);
88052 #define WANT_WARN_ON_SLOWPATH
88053 #define __WARN() warn_slowpath_null(__FILE__, __LINE__)
88054 #define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg)
88055diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
88056index 1bfcfe5..e04c5c9 100644
88057--- a/include/asm-generic/cache.h
88058+++ b/include/asm-generic/cache.h
88059@@ -6,7 +6,7 @@
88060 * cache lines need to provide their own cache.h.
88061 */
88062
88063-#define L1_CACHE_SHIFT 5
88064-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
88065+#define L1_CACHE_SHIFT 5UL
88066+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
88067
88068 #endif /* __ASM_GENERIC_CACHE_H */
88069diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
88070index 0d68a1e..b74a761 100644
88071--- a/include/asm-generic/emergency-restart.h
88072+++ b/include/asm-generic/emergency-restart.h
88073@@ -1,7 +1,7 @@
88074 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
88075 #define _ASM_GENERIC_EMERGENCY_RESTART_H
88076
88077-static inline void machine_emergency_restart(void)
88078+static inline __noreturn void machine_emergency_restart(void)
88079 {
88080 machine_restart(NULL);
88081 }
88082diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
88083index 90f99c7..00ce236 100644
88084--- a/include/asm-generic/kmap_types.h
88085+++ b/include/asm-generic/kmap_types.h
88086@@ -2,9 +2,9 @@
88087 #define _ASM_GENERIC_KMAP_TYPES_H
88088
88089 #ifdef __WITH_KM_FENCE
88090-# define KM_TYPE_NR 41
88091+# define KM_TYPE_NR 42
88092 #else
88093-# define KM_TYPE_NR 20
88094+# define KM_TYPE_NR 21
88095 #endif
88096
88097 #endif
88098diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
88099index 9ceb03b..62b0b8f 100644
88100--- a/include/asm-generic/local.h
88101+++ b/include/asm-generic/local.h
88102@@ -23,24 +23,37 @@ typedef struct
88103 atomic_long_t a;
88104 } local_t;
88105
88106+typedef struct {
88107+ atomic_long_unchecked_t a;
88108+} local_unchecked_t;
88109+
88110 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
88111
88112 #define local_read(l) atomic_long_read(&(l)->a)
88113+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
88114 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
88115+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
88116 #define local_inc(l) atomic_long_inc(&(l)->a)
88117+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
88118 #define local_dec(l) atomic_long_dec(&(l)->a)
88119+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
88120 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
88121+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
88122 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
88123+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
88124
88125 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
88126 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
88127 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
88128 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
88129 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
88130+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
88131 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
88132 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
88133+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
88134
88135 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
88136+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
88137 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
88138 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
88139 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
88140diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
88141index 725612b..9cc513a 100644
88142--- a/include/asm-generic/pgtable-nopmd.h
88143+++ b/include/asm-generic/pgtable-nopmd.h
88144@@ -1,14 +1,19 @@
88145 #ifndef _PGTABLE_NOPMD_H
88146 #define _PGTABLE_NOPMD_H
88147
88148-#ifndef __ASSEMBLY__
88149-
88150 #include <asm-generic/pgtable-nopud.h>
88151
88152-struct mm_struct;
88153-
88154 #define __PAGETABLE_PMD_FOLDED
88155
88156+#define PMD_SHIFT PUD_SHIFT
88157+#define PTRS_PER_PMD 1
88158+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
88159+#define PMD_MASK (~(PMD_SIZE-1))
88160+
88161+#ifndef __ASSEMBLY__
88162+
88163+struct mm_struct;
88164+
88165 /*
88166 * Having the pmd type consist of a pud gets the size right, and allows
88167 * us to conceptually access the pud entry that this pmd is folded into
88168@@ -16,11 +21,6 @@ struct mm_struct;
88169 */
88170 typedef struct { pud_t pud; } pmd_t;
88171
88172-#define PMD_SHIFT PUD_SHIFT
88173-#define PTRS_PER_PMD 1
88174-#define PMD_SIZE (1UL << PMD_SHIFT)
88175-#define PMD_MASK (~(PMD_SIZE-1))
88176-
88177 /*
88178 * The "pud_xxx()" functions here are trivial for a folded two-level
88179 * setup: the pmd is never bad, and a pmd always exists (as it's folded
88180diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
88181index 810431d..0ec4804f 100644
88182--- a/include/asm-generic/pgtable-nopud.h
88183+++ b/include/asm-generic/pgtable-nopud.h
88184@@ -1,10 +1,15 @@
88185 #ifndef _PGTABLE_NOPUD_H
88186 #define _PGTABLE_NOPUD_H
88187
88188-#ifndef __ASSEMBLY__
88189-
88190 #define __PAGETABLE_PUD_FOLDED
88191
88192+#define PUD_SHIFT PGDIR_SHIFT
88193+#define PTRS_PER_PUD 1
88194+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
88195+#define PUD_MASK (~(PUD_SIZE-1))
88196+
88197+#ifndef __ASSEMBLY__
88198+
88199 /*
88200 * Having the pud type consist of a pgd gets the size right, and allows
88201 * us to conceptually access the pgd entry that this pud is folded into
88202@@ -12,11 +17,6 @@
88203 */
88204 typedef struct { pgd_t pgd; } pud_t;
88205
88206-#define PUD_SHIFT PGDIR_SHIFT
88207-#define PTRS_PER_PUD 1
88208-#define PUD_SIZE (1UL << PUD_SHIFT)
88209-#define PUD_MASK (~(PUD_SIZE-1))
88210-
88211 /*
88212 * The "pgd_xxx()" functions here are trivial for a folded two-level
88213 * setup: the pud is never bad, and a pud always exists (as it's folded
88214@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
88215 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
88216
88217 #define pgd_populate(mm, pgd, pud) do { } while (0)
88218+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
88219 /*
88220 * (puds are folded into pgds so this doesn't get actually called,
88221 * but the define is needed for a generic inline function.)
88222diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
88223index 4d46085..f4e92ef 100644
88224--- a/include/asm-generic/pgtable.h
88225+++ b/include/asm-generic/pgtable.h
88226@@ -689,6 +689,22 @@ static inline int pmd_protnone(pmd_t pmd)
88227 }
88228 #endif /* CONFIG_NUMA_BALANCING */
88229
88230+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
88231+#ifdef CONFIG_PAX_KERNEXEC
88232+#error KERNEXEC requires pax_open_kernel
88233+#else
88234+static inline unsigned long pax_open_kernel(void) { return 0; }
88235+#endif
88236+#endif
88237+
88238+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
88239+#ifdef CONFIG_PAX_KERNEXEC
88240+#error KERNEXEC requires pax_close_kernel
88241+#else
88242+static inline unsigned long pax_close_kernel(void) { return 0; }
88243+#endif
88244+#endif
88245+
88246 #endif /* CONFIG_MMU */
88247
88248 #endif /* !__ASSEMBLY__ */
88249diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
88250index 72d8803..cb9749c 100644
88251--- a/include/asm-generic/uaccess.h
88252+++ b/include/asm-generic/uaccess.h
88253@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
88254 return __clear_user(to, n);
88255 }
88256
88257+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
88258+#ifdef CONFIG_PAX_MEMORY_UDEREF
88259+#error UDEREF requires pax_open_userland
88260+#else
88261+static inline unsigned long pax_open_userland(void) { return 0; }
88262+#endif
88263+#endif
88264+
88265+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
88266+#ifdef CONFIG_PAX_MEMORY_UDEREF
88267+#error UDEREF requires pax_close_userland
88268+#else
88269+static inline unsigned long pax_close_userland(void) { return 0; }
88270+#endif
88271+#endif
88272+
88273 #endif /* __ASM_GENERIC_UACCESS_H */
88274diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
88275index ac78910..8b5f068 100644
88276--- a/include/asm-generic/vmlinux.lds.h
88277+++ b/include/asm-generic/vmlinux.lds.h
88278@@ -234,6 +234,7 @@
88279 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
88280 VMLINUX_SYMBOL(__start_rodata) = .; \
88281 *(.rodata) *(.rodata.*) \
88282+ *(.data..read_only) \
88283 *(__vermagic) /* Kernel version magic */ \
88284 . = ALIGN(8); \
88285 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
88286@@ -492,6 +493,7 @@
88287 KERNEL_CTORS() \
88288 MCOUNT_REC() \
88289 *(.init.rodata) \
88290+ *(.init.rodata.*) \
88291 FTRACE_EVENTS() \
88292 TRACE_SYSCALLS() \
88293 KPROBE_BLACKLIST() \
88294@@ -511,6 +513,8 @@
88295
88296 #define EXIT_DATA \
88297 *(.exit.data) \
88298+ *(.exit.rodata) \
88299+ *(.exit.rodata.*) \
88300 MEM_DISCARD(exit.data) \
88301 MEM_DISCARD(exit.rodata)
88302
88303@@ -727,17 +731,18 @@
88304 * section in the linker script will go there too. @phdr should have
88305 * a leading colon.
88306 *
88307- * Note that this macros defines __per_cpu_load as an absolute symbol.
88308+ * Note that this macros defines per_cpu_load as an absolute symbol.
88309 * If there is no need to put the percpu section at a predetermined
88310 * address, use PERCPU_SECTION.
88311 */
88312 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
88313- VMLINUX_SYMBOL(__per_cpu_load) = .; \
88314- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
88315+ per_cpu_load = .; \
88316+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
88317 - LOAD_OFFSET) { \
88318+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
88319 PERCPU_INPUT(cacheline) \
88320 } phdr \
88321- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
88322+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
88323
88324 /**
88325 * PERCPU_SECTION - define output section for percpu area, simple version
88326diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
88327index 623a59c..1e79ab9 100644
88328--- a/include/crypto/algapi.h
88329+++ b/include/crypto/algapi.h
88330@@ -34,7 +34,7 @@ struct crypto_type {
88331 unsigned int maskclear;
88332 unsigned int maskset;
88333 unsigned int tfmsize;
88334-};
88335+} __do_const;
88336
88337 struct crypto_instance {
88338 struct crypto_alg alg;
88339diff --git a/include/drm/drmP.h b/include/drm/drmP.h
88340index e928625..ff97886 100644
88341--- a/include/drm/drmP.h
88342+++ b/include/drm/drmP.h
88343@@ -59,6 +59,7 @@
88344
88345 #include <asm/mman.h>
88346 #include <asm/pgalloc.h>
88347+#include <asm/local.h>
88348 #include <asm/uaccess.h>
88349
88350 #include <uapi/drm/drm.h>
88351@@ -133,17 +134,18 @@ void drm_err(const char *format, ...);
88352 /*@{*/
88353
88354 /* driver capabilities and requirements mask */
88355-#define DRIVER_USE_AGP 0x1
88356-#define DRIVER_PCI_DMA 0x8
88357-#define DRIVER_SG 0x10
88358-#define DRIVER_HAVE_DMA 0x20
88359-#define DRIVER_HAVE_IRQ 0x40
88360-#define DRIVER_IRQ_SHARED 0x80
88361-#define DRIVER_GEM 0x1000
88362-#define DRIVER_MODESET 0x2000
88363-#define DRIVER_PRIME 0x4000
88364-#define DRIVER_RENDER 0x8000
88365-#define DRIVER_ATOMIC 0x10000
88366+#define DRIVER_USE_AGP 0x1
88367+#define DRIVER_PCI_DMA 0x8
88368+#define DRIVER_SG 0x10
88369+#define DRIVER_HAVE_DMA 0x20
88370+#define DRIVER_HAVE_IRQ 0x40
88371+#define DRIVER_IRQ_SHARED 0x80
88372+#define DRIVER_GEM 0x1000
88373+#define DRIVER_MODESET 0x2000
88374+#define DRIVER_PRIME 0x4000
88375+#define DRIVER_RENDER 0x8000
88376+#define DRIVER_ATOMIC 0x10000
88377+#define DRIVER_KMS_LEGACY_CONTEXT 0x20000
88378
88379 /***********************************************************************/
88380 /** \name Macros to make printk easier */
88381@@ -224,10 +226,12 @@ void drm_err(const char *format, ...);
88382 * \param cmd command.
88383 * \param arg argument.
88384 */
88385-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
88386+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
88387+ struct drm_file *file_priv);
88388+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
88389 struct drm_file *file_priv);
88390
88391-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
88392+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
88393 unsigned long arg);
88394
88395 #define DRM_IOCTL_NR(n) _IOC_NR(n)
88396@@ -243,10 +247,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
88397 struct drm_ioctl_desc {
88398 unsigned int cmd;
88399 int flags;
88400- drm_ioctl_t *func;
88401+ drm_ioctl_t func;
88402 unsigned int cmd_drv;
88403 const char *name;
88404-};
88405+} __do_const;
88406
88407 /**
88408 * Creates a driver or general drm_ioctl_desc array entry for the given
88409@@ -632,7 +636,8 @@ struct drm_info_list {
88410 int (*show)(struct seq_file*, void*); /** show callback */
88411 u32 driver_features; /**< Required driver features for this entry */
88412 void *data;
88413-};
88414+} __do_const;
88415+typedef struct drm_info_list __no_const drm_info_list_no_const;
88416
88417 /**
88418 * debugfs node structure. This structure represents a debugfs file.
88419@@ -716,7 +721,7 @@ struct drm_device {
88420
88421 /** \name Usage Counters */
88422 /*@{ */
88423- int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
88424+ local_t open_count; /**< Outstanding files open, protected by drm_global_mutex. */
88425 spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
88426 int buf_use; /**< Buffers in use -- cannot alloc */
88427 atomic_t buf_alloc; /**< Buffer allocation in progress */
88428diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
88429index c250a22..59d2094 100644
88430--- a/include/drm/drm_crtc_helper.h
88431+++ b/include/drm/drm_crtc_helper.h
88432@@ -160,7 +160,7 @@ struct drm_encoder_helper_funcs {
88433 int (*atomic_check)(struct drm_encoder *encoder,
88434 struct drm_crtc_state *crtc_state,
88435 struct drm_connector_state *conn_state);
88436-};
88437+} __no_const;
88438
88439 /**
88440 * struct drm_connector_helper_funcs - helper operations for connectors
88441diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
88442index d016dc5..3951fe0 100644
88443--- a/include/drm/i915_pciids.h
88444+++ b/include/drm/i915_pciids.h
88445@@ -37,7 +37,7 @@
88446 */
88447 #define INTEL_VGA_DEVICE(id, info) { \
88448 0x8086, id, \
88449- ~0, ~0, \
88450+ PCI_ANY_ID, PCI_ANY_ID, \
88451 0x030000, 0xff0000, \
88452 (unsigned long) info }
88453
88454diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
88455index 72dcbe8..8db58d7 100644
88456--- a/include/drm/ttm/ttm_memory.h
88457+++ b/include/drm/ttm/ttm_memory.h
88458@@ -48,7 +48,7 @@
88459
88460 struct ttm_mem_shrink {
88461 int (*do_shrink) (struct ttm_mem_shrink *);
88462-};
88463+} __no_const;
88464
88465 /**
88466 * struct ttm_mem_global - Global memory accounting structure.
88467diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
88468index 49a8284..9643967 100644
88469--- a/include/drm/ttm/ttm_page_alloc.h
88470+++ b/include/drm/ttm/ttm_page_alloc.h
88471@@ -80,6 +80,7 @@ void ttm_dma_page_alloc_fini(void);
88472 */
88473 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
88474
88475+struct device;
88476 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
88477 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
88478
88479diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
88480index 4b840e8..155d235 100644
88481--- a/include/keys/asymmetric-subtype.h
88482+++ b/include/keys/asymmetric-subtype.h
88483@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
88484 /* Verify the signature on a key of this subtype (optional) */
88485 int (*verify_signature)(const struct key *key,
88486 const struct public_key_signature *sig);
88487-};
88488+} __do_const;
88489
88490 /**
88491 * asymmetric_key_subtype - Get the subtype from an asymmetric key
88492diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
88493index c1da539..1dcec55 100644
88494--- a/include/linux/atmdev.h
88495+++ b/include/linux/atmdev.h
88496@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
88497 #endif
88498
88499 struct k_atm_aal_stats {
88500-#define __HANDLE_ITEM(i) atomic_t i
88501+#define __HANDLE_ITEM(i) atomic_unchecked_t i
88502 __AAL_STAT_ITEMS
88503 #undef __HANDLE_ITEM
88504 };
88505@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
88506 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
88507 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
88508 struct module *owner;
88509-};
88510+} __do_const ;
88511
88512 struct atmphy_ops {
88513 int (*start)(struct atm_dev *dev);
88514diff --git a/include/linux/atomic.h b/include/linux/atomic.h
88515index 5b08a85..60922fb 100644
88516--- a/include/linux/atomic.h
88517+++ b/include/linux/atomic.h
88518@@ -12,7 +12,7 @@
88519 * Atomically adds @a to @v, so long as @v was not already @u.
88520 * Returns non-zero if @v was not @u, and zero otherwise.
88521 */
88522-static inline int atomic_add_unless(atomic_t *v, int a, int u)
88523+static inline int __intentional_overflow(-1) atomic_add_unless(atomic_t *v, int a, int u)
88524 {
88525 return __atomic_add_unless(v, a, u) != u;
88526 }
88527diff --git a/include/linux/audit.h b/include/linux/audit.h
88528index c2e7e3a..8bfc0e1 100644
88529--- a/include/linux/audit.h
88530+++ b/include/linux/audit.h
88531@@ -223,7 +223,7 @@ static inline void audit_ptrace(struct task_struct *t)
88532 extern unsigned int audit_serial(void);
88533 extern int auditsc_get_stamp(struct audit_context *ctx,
88534 struct timespec *t, unsigned int *serial);
88535-extern int audit_set_loginuid(kuid_t loginuid);
88536+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
88537
88538 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
88539 {
88540diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
88541index 576e463..28fd926 100644
88542--- a/include/linux/binfmts.h
88543+++ b/include/linux/binfmts.h
88544@@ -44,7 +44,7 @@ struct linux_binprm {
88545 unsigned interp_flags;
88546 unsigned interp_data;
88547 unsigned long loader, exec;
88548-};
88549+} __randomize_layout;
88550
88551 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
88552 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
88553@@ -77,8 +77,10 @@ struct linux_binfmt {
88554 int (*load_binary)(struct linux_binprm *);
88555 int (*load_shlib)(struct file *);
88556 int (*core_dump)(struct coredump_params *cprm);
88557+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
88558+ void (*handle_mmap)(struct file *);
88559 unsigned long min_coredump; /* minimal dump size */
88560-};
88561+} __do_const __randomize_layout;
88562
88563 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
88564
88565diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
88566index dbfbf49..10be372 100644
88567--- a/include/linux/bitmap.h
88568+++ b/include/linux/bitmap.h
88569@@ -299,7 +299,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
88570 return __bitmap_full(src, nbits);
88571 }
88572
88573-static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
88574+static inline int __intentional_overflow(-1) bitmap_weight(const unsigned long *src, unsigned int nbits)
88575 {
88576 if (small_const_nbits(nbits))
88577 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
88578diff --git a/include/linux/bitops.h b/include/linux/bitops.h
88579index 5d858e0..336c1d9 100644
88580--- a/include/linux/bitops.h
88581+++ b/include/linux/bitops.h
88582@@ -105,7 +105,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
88583 * @word: value to rotate
88584 * @shift: bits to roll
88585 */
88586-static inline __u32 rol32(__u32 word, unsigned int shift)
88587+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
88588 {
88589 return (word << shift) | (word >> (32 - shift));
88590 }
88591@@ -115,7 +115,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
88592 * @word: value to rotate
88593 * @shift: bits to roll
88594 */
88595-static inline __u32 ror32(__u32 word, unsigned int shift)
88596+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
88597 {
88598 return (word >> shift) | (word << (32 - shift));
88599 }
88600@@ -171,7 +171,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
88601 return (__s32)(value << shift) >> shift;
88602 }
88603
88604-static inline unsigned fls_long(unsigned long l)
88605+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
88606 {
88607 if (sizeof(l) == 4)
88608 return fls(l);
88609diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
88610index 7f9a516..8889453 100644
88611--- a/include/linux/blkdev.h
88612+++ b/include/linux/blkdev.h
88613@@ -1616,7 +1616,7 @@ struct block_device_operations {
88614 /* this callback is with swap_lock and sometimes page table lock held */
88615 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
88616 struct module *owner;
88617-};
88618+} __do_const;
88619
88620 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
88621 unsigned long);
88622diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
88623index afc1343..9735539 100644
88624--- a/include/linux/blktrace_api.h
88625+++ b/include/linux/blktrace_api.h
88626@@ -25,7 +25,7 @@ struct blk_trace {
88627 struct dentry *dropped_file;
88628 struct dentry *msg_file;
88629 struct list_head running_list;
88630- atomic_t dropped;
88631+ atomic_unchecked_t dropped;
88632 };
88633
88634 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
88635diff --git a/include/linux/cache.h b/include/linux/cache.h
88636index 17e7e82..1d7da26 100644
88637--- a/include/linux/cache.h
88638+++ b/include/linux/cache.h
88639@@ -16,6 +16,14 @@
88640 #define __read_mostly
88641 #endif
88642
88643+#ifndef __read_only
88644+#ifdef CONFIG_PAX_KERNEXEC
88645+#error KERNEXEC requires __read_only
88646+#else
88647+#define __read_only __read_mostly
88648+#endif
88649+#endif
88650+
88651 #ifndef ____cacheline_aligned
88652 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
88653 #endif
88654diff --git a/include/linux/capability.h b/include/linux/capability.h
88655index aa93e5e..985a1b0 100644
88656--- a/include/linux/capability.h
88657+++ b/include/linux/capability.h
88658@@ -214,9 +214,14 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
88659 extern bool capable(int cap);
88660 extern bool ns_capable(struct user_namespace *ns, int cap);
88661 extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
88662+extern bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap);
88663 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
88664+extern bool capable_nolog(int cap);
88665+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
88666
88667 /* audit system wants to get cap info from files as well */
88668 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
88669
88670+extern int is_privileged_binary(const struct dentry *dentry);
88671+
88672 #endif /* !_LINUX_CAPABILITY_H */
88673diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
88674index 8609d57..86e4d79 100644
88675--- a/include/linux/cdrom.h
88676+++ b/include/linux/cdrom.h
88677@@ -87,7 +87,6 @@ struct cdrom_device_ops {
88678
88679 /* driver specifications */
88680 const int capability; /* capability flags */
88681- int n_minors; /* number of active minor devices */
88682 /* handle uniform packets for scsi type devices (scsi,atapi) */
88683 int (*generic_packet) (struct cdrom_device_info *,
88684 struct packet_command *);
88685diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
88686index 4ce9056..86caac6 100644
88687--- a/include/linux/cleancache.h
88688+++ b/include/linux/cleancache.h
88689@@ -31,7 +31,7 @@ struct cleancache_ops {
88690 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
88691 void (*invalidate_inode)(int, struct cleancache_filekey);
88692 void (*invalidate_fs)(int);
88693-};
88694+} __no_const;
88695
88696 extern struct cleancache_ops *
88697 cleancache_register_ops(struct cleancache_ops *ops);
88698diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
88699index 5591ea7..61b77ce 100644
88700--- a/include/linux/clk-provider.h
88701+++ b/include/linux/clk-provider.h
88702@@ -195,6 +195,7 @@ struct clk_ops {
88703 void (*init)(struct clk_hw *hw);
88704 int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
88705 };
88706+typedef struct clk_ops __no_const clk_ops_no_const;
88707
88708 /**
88709 * struct clk_init_data - holds init data that's common to all clocks and is
88710diff --git a/include/linux/compat.h b/include/linux/compat.h
88711index ab25814..d1540d1 100644
88712--- a/include/linux/compat.h
88713+++ b/include/linux/compat.h
88714@@ -316,7 +316,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
88715 compat_size_t __user *len_ptr);
88716
88717 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
88718-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
88719+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
88720 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
88721 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
88722 compat_ssize_t msgsz, int msgflg);
88723@@ -325,7 +325,7 @@ asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp,
88724 long compat_sys_msgctl(int first, int second, void __user *uptr);
88725 long compat_sys_shmctl(int first, int second, void __user *uptr);
88726 long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
88727- unsigned nsems, const struct compat_timespec __user *timeout);
88728+ compat_long_t nsems, const struct compat_timespec __user *timeout);
88729 asmlinkage long compat_sys_keyctl(u32 option,
88730 u32 arg2, u32 arg3, u32 arg4, u32 arg5);
88731 asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
88732@@ -439,7 +439,7 @@ extern int compat_ptrace_request(struct task_struct *child,
88733 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
88734 compat_ulong_t addr, compat_ulong_t data);
88735 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
88736- compat_long_t addr, compat_long_t data);
88737+ compat_ulong_t addr, compat_ulong_t data);
88738
88739 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
88740 /*
88741diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
88742index cdf13ca..ba5e086 100644
88743--- a/include/linux/compiler-gcc.h
88744+++ b/include/linux/compiler-gcc.h
88745@@ -94,8 +94,8 @@
88746 */
88747 #define __pure __attribute__((pure))
88748 #define __aligned(x) __attribute__((aligned(x)))
88749-#define __printf(a, b) __attribute__((format(printf, a, b)))
88750-#define __scanf(a, b) __attribute__((format(scanf, a, b)))
88751+#define __printf(a, b) __attribute__((format(printf, a, b))) __nocapture(a, b)
88752+#define __scanf(a, b) __attribute__((format(scanf, a, b))) __nocapture(a, b)
88753 #define noinline __attribute__((noinline))
88754 #define __attribute_const__ __attribute__((__const__))
88755 #define __maybe_unused __attribute__((unused))
88756diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
88757index 769e198..7ee7cb5 100644
88758--- a/include/linux/compiler-gcc4.h
88759+++ b/include/linux/compiler-gcc4.h
88760@@ -39,9 +39,38 @@
88761 # define __compiletime_warning(message) __attribute__((warning(message)))
88762 # define __compiletime_error(message) __attribute__((error(message)))
88763 #endif /* __CHECKER__ */
88764+
88765+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
88766+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
88767+#define __bos0(ptr) __bos((ptr), 0)
88768+#define __bos1(ptr) __bos((ptr), 1)
88769 #endif /* GCC_VERSION >= 40300 */
88770
88771 #if GCC_VERSION >= 40500
88772+
88773+#ifdef RANDSTRUCT_PLUGIN
88774+#define __randomize_layout __attribute__((randomize_layout))
88775+#define __no_randomize_layout __attribute__((no_randomize_layout))
88776+#endif
88777+
88778+#ifdef CONSTIFY_PLUGIN
88779+#define __no_const __attribute__((no_const))
88780+#define __do_const __attribute__((do_const))
88781+#endif
88782+
88783+#ifdef SIZE_OVERFLOW_PLUGIN
88784+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
88785+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
88786+#endif
88787+
88788+#ifdef LATENT_ENTROPY_PLUGIN
88789+#define __latent_entropy __attribute__((latent_entropy))
88790+#endif
88791+
88792+#ifdef INITIFY_PLUGIN
88793+#define __nocapture(...) __attribute__((nocapture(__VA_ARGS__)))
88794+#endif
88795+
88796 /*
88797 * Mark a position in code as unreachable. This can be used to
88798 * suppress control flow warnings after asm blocks that transfer
88799diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
88800index efee493..8aa8f6b 100644
88801--- a/include/linux/compiler-gcc5.h
88802+++ b/include/linux/compiler-gcc5.h
88803@@ -28,6 +28,34 @@
88804 # define __compiletime_error(message) __attribute__((error(message)))
88805 #endif /* __CHECKER__ */
88806
88807+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
88808+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
88809+#define __bos0(ptr) __bos((ptr), 0)
88810+#define __bos1(ptr) __bos((ptr), 1)
88811+
88812+#ifdef RANDSTRUCT_PLUGIN
88813+#define __randomize_layout __attribute__((randomize_layout))
88814+#define __no_randomize_layout __attribute__((no_randomize_layout))
88815+#endif
88816+
88817+#ifdef CONSTIFY_PLUGIN
88818+#define __no_const __attribute__((no_const))
88819+#define __do_const __attribute__((do_const))
88820+#endif
88821+
88822+#ifdef SIZE_OVERFLOW_PLUGIN
88823+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
88824+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
88825+#endif
88826+
88827+#ifdef LATENT_ENTROPY_PLUGIN
88828+#define __latent_entropy __attribute__((latent_entropy))
88829+#endif
88830+
88831+#ifdef INITIFY_PLUGIN
88832+#define __nocapture(...) __attribute__((nocapture(__VA_ARGS__)))
88833+#endif
88834+
88835 /*
88836 * Mark a position in code as unreachable. This can be used to
88837 * suppress control flow warnings after asm blocks that transfer
88838diff --git a/include/linux/compiler.h b/include/linux/compiler.h
88839index 1b45e4a..eff29a7 100644
88840--- a/include/linux/compiler.h
88841+++ b/include/linux/compiler.h
88842@@ -5,11 +5,14 @@
88843
88844 #ifdef __CHECKER__
88845 # define __user __attribute__((noderef, address_space(1)))
88846+# define __force_user __force __user
88847 # define __kernel __attribute__((address_space(0)))
88848+# define __force_kernel __force __kernel
88849 # define __safe __attribute__((safe))
88850 # define __force __attribute__((force))
88851 # define __nocast __attribute__((nocast))
88852 # define __iomem __attribute__((noderef, address_space(2)))
88853+# define __force_iomem __force __iomem
88854 # define __must_hold(x) __attribute__((context(x,1,1)))
88855 # define __acquires(x) __attribute__((context(x,0,1)))
88856 # define __releases(x) __attribute__((context(x,1,0)))
88857@@ -17,20 +20,37 @@
88858 # define __release(x) __context__(x,-1)
88859 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
88860 # define __percpu __attribute__((noderef, address_space(3)))
88861+# define __force_percpu __force __percpu
88862 #ifdef CONFIG_SPARSE_RCU_POINTER
88863 # define __rcu __attribute__((noderef, address_space(4)))
88864+# define __force_rcu __force __rcu
88865 #else
88866 # define __rcu
88867+# define __force_rcu
88868 #endif
88869 extern void __chk_user_ptr(const volatile void __user *);
88870 extern void __chk_io_ptr(const volatile void __iomem *);
88871 #else
88872-# define __user
88873-# define __kernel
88874+# ifdef CHECKER_PLUGIN
88875+//# define __user
88876+//# define __force_user
88877+//# define __kernel
88878+//# define __force_kernel
88879+# else
88880+# ifdef STRUCTLEAK_PLUGIN
88881+# define __user __attribute__((user))
88882+# else
88883+# define __user
88884+# endif
88885+# define __force_user
88886+# define __kernel
88887+# define __force_kernel
88888+# endif
88889 # define __safe
88890 # define __force
88891 # define __nocast
88892 # define __iomem
88893+# define __force_iomem
88894 # define __chk_user_ptr(x) (void)0
88895 # define __chk_io_ptr(x) (void)0
88896 # define __builtin_warning(x, y...) (1)
88897@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
88898 # define __release(x) (void)0
88899 # define __cond_lock(x,c) (c)
88900 # define __percpu
88901+# define __force_percpu
88902 # define __rcu
88903+# define __force_rcu
88904 #endif
88905
88906 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
88907@@ -205,32 +227,32 @@ static __always_inline void data_access_exceeds_word_size(void)
88908 static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
88909 {
88910 switch (size) {
88911- case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
88912- case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
88913- case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
88914+ case 1: *(__u8 *)res = *(const volatile __u8 *)p; break;
88915+ case 2: *(__u16 *)res = *(const volatile __u16 *)p; break;
88916+ case 4: *(__u32 *)res = *(const volatile __u32 *)p; break;
88917 #ifdef CONFIG_64BIT
88918- case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
88919+ case 8: *(__u64 *)res = *(const volatile __u64 *)p; break;
88920 #endif
88921 default:
88922 barrier();
88923- __builtin_memcpy((void *)res, (const void *)p, size);
88924+ __builtin_memcpy(res, (const void *)p, size);
88925 data_access_exceeds_word_size();
88926 barrier();
88927 }
88928 }
88929
88930-static __always_inline void __write_once_size(volatile void *p, void *res, int size)
88931+static __always_inline void __write_once_size(volatile void *p, const void *res, int size)
88932 {
88933 switch (size) {
88934- case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
88935- case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
88936- case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
88937+ case 1: *(volatile __u8 *)p = *(const __u8 *)res; break;
88938+ case 2: *(volatile __u16 *)p = *(const __u16 *)res; break;
88939+ case 4: *(volatile __u32 *)p = *(const __u32 *)res; break;
88940 #ifdef CONFIG_64BIT
88941- case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
88942+ case 8: *(volatile __u64 *)p = *(const __u64 *)res; break;
88943 #endif
88944 default:
88945 barrier();
88946- __builtin_memcpy((void *)p, (const void *)res, size);
88947+ __builtin_memcpy((void *)p, res, size);
88948 data_access_exceeds_word_size();
88949 barrier();
88950 }
88951@@ -364,6 +386,38 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
88952 # define __attribute_const__ /* unimplemented */
88953 #endif
88954
88955+#ifndef __randomize_layout
88956+# define __randomize_layout
88957+#endif
88958+
88959+#ifndef __no_randomize_layout
88960+# define __no_randomize_layout
88961+#endif
88962+
88963+#ifndef __no_const
88964+# define __no_const
88965+#endif
88966+
88967+#ifndef __do_const
88968+# define __do_const
88969+#endif
88970+
88971+#ifndef __size_overflow
88972+# define __size_overflow(...)
88973+#endif
88974+
88975+#ifndef __intentional_overflow
88976+# define __intentional_overflow(...)
88977+#endif
88978+
88979+#ifndef __latent_entropy
88980+# define __latent_entropy
88981+#endif
88982+
88983+#ifndef __nocapture
88984+# define __nocapture(...)
88985+#endif
88986+
88987 /*
88988 * Tell gcc if a function is cold. The compiler will assume any path
88989 * directly leading to the call is unlikely.
88990@@ -373,6 +427,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
88991 #define __cold
88992 #endif
88993
88994+#ifndef __alloc_size
88995+#define __alloc_size(...)
88996+#endif
88997+
88998+#ifndef __bos
88999+#define __bos(ptr, arg)
89000+#endif
89001+
89002+#ifndef __bos0
89003+#define __bos0(ptr)
89004+#endif
89005+
89006+#ifndef __bos1
89007+#define __bos1(ptr)
89008+#endif
89009+
89010 /* Simple shorthand for a section definition */
89011 #ifndef __section
89012 # define __section(S) __attribute__ ((__section__(#S)))
89013@@ -387,6 +457,8 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
89014 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
89015 #endif
89016
89017+#define __type_is_unsigned(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
89018+
89019 /* Is this type a native word size -- useful for atomic operations */
89020 #ifndef __native_word
89021 # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
89022@@ -466,8 +538,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
89023 */
89024 #define __ACCESS_ONCE(x) ({ \
89025 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
89026- (volatile typeof(x) *)&(x); })
89027+ (volatile const typeof(x) *)&(x); })
89028 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
89029+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
89030
89031 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
89032 #ifdef CONFIG_KPROBES
89033diff --git a/include/linux/completion.h b/include/linux/completion.h
89034index 5d5aaae..0ea9b84 100644
89035--- a/include/linux/completion.h
89036+++ b/include/linux/completion.h
89037@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
89038
89039 extern void wait_for_completion(struct completion *);
89040 extern void wait_for_completion_io(struct completion *);
89041-extern int wait_for_completion_interruptible(struct completion *x);
89042-extern int wait_for_completion_killable(struct completion *x);
89043+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
89044+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
89045 extern unsigned long wait_for_completion_timeout(struct completion *x,
89046- unsigned long timeout);
89047+ unsigned long timeout) __intentional_overflow(-1);
89048 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
89049- unsigned long timeout);
89050+ unsigned long timeout) __intentional_overflow(-1);
89051 extern long wait_for_completion_interruptible_timeout(
89052- struct completion *x, unsigned long timeout);
89053+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
89054 extern long wait_for_completion_killable_timeout(
89055- struct completion *x, unsigned long timeout);
89056+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
89057 extern bool try_wait_for_completion(struct completion *x);
89058 extern bool completion_done(struct completion *x);
89059
89060diff --git a/include/linux/configfs.h b/include/linux/configfs.h
89061index 34025df..d94bbbc 100644
89062--- a/include/linux/configfs.h
89063+++ b/include/linux/configfs.h
89064@@ -125,7 +125,7 @@ struct configfs_attribute {
89065 const char *ca_name;
89066 struct module *ca_owner;
89067 umode_t ca_mode;
89068-};
89069+} __do_const;
89070
89071 /*
89072 * Users often need to create attribute structures for their configurable
89073diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
89074index 2ee4888..0451f5e 100644
89075--- a/include/linux/cpufreq.h
89076+++ b/include/linux/cpufreq.h
89077@@ -207,6 +207,7 @@ struct global_attr {
89078 ssize_t (*store)(struct kobject *a, struct attribute *b,
89079 const char *c, size_t count);
89080 };
89081+typedef struct global_attr __no_const global_attr_no_const;
89082
89083 #define define_one_global_ro(_name) \
89084 static struct global_attr _name = \
89085@@ -278,7 +279,7 @@ struct cpufreq_driver {
89086 bool boost_supported;
89087 bool boost_enabled;
89088 int (*set_boost)(int state);
89089-};
89090+} __do_const;
89091
89092 /* flags */
89093 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
89094diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
89095index 9c5e892..feb34e0 100644
89096--- a/include/linux/cpuidle.h
89097+++ b/include/linux/cpuidle.h
89098@@ -59,7 +59,8 @@ struct cpuidle_state {
89099 void (*enter_freeze) (struct cpuidle_device *dev,
89100 struct cpuidle_driver *drv,
89101 int index);
89102-};
89103+} __do_const;
89104+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
89105
89106 /* Idle State Flags */
89107 #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
89108@@ -227,7 +228,7 @@ struct cpuidle_governor {
89109 void (*reflect) (struct cpuidle_device *dev, int index);
89110
89111 struct module *owner;
89112-};
89113+} __do_const;
89114
89115 #ifdef CONFIG_CPU_IDLE
89116 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
89117diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
89118index 086549a..a572d94 100644
89119--- a/include/linux/cpumask.h
89120+++ b/include/linux/cpumask.h
89121@@ -126,17 +126,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
89122 }
89123
89124 /* Valid inputs for n are -1 and 0. */
89125-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
89126+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
89127 {
89128 return n+1;
89129 }
89130
89131-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
89132+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
89133 {
89134 return n+1;
89135 }
89136
89137-static inline unsigned int cpumask_next_and(int n,
89138+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
89139 const struct cpumask *srcp,
89140 const struct cpumask *andp)
89141 {
89142@@ -182,7 +182,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
89143 *
89144 * Returns >= nr_cpu_ids if no further cpus set.
89145 */
89146-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
89147+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
89148 {
89149 /* -1 is a legal arg here. */
89150 if (n != -1)
89151@@ -197,7 +197,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
89152 *
89153 * Returns >= nr_cpu_ids if no further cpus unset.
89154 */
89155-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
89156+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
89157 {
89158 /* -1 is a legal arg here. */
89159 if (n != -1)
89160@@ -205,7 +205,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
89161 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
89162 }
89163
89164-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
89165+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
89166 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
89167 int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
89168
89169@@ -472,7 +472,7 @@ static inline bool cpumask_full(const struct cpumask *srcp)
89170 * cpumask_weight - Count of bits in *srcp
89171 * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
89172 */
89173-static inline unsigned int cpumask_weight(const struct cpumask *srcp)
89174+static inline unsigned int __intentional_overflow(-1) cpumask_weight(const struct cpumask *srcp)
89175 {
89176 return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
89177 }
89178diff --git a/include/linux/cred.h b/include/linux/cred.h
89179index 2fb2ca2..d6a3340 100644
89180--- a/include/linux/cred.h
89181+++ b/include/linux/cred.h
89182@@ -35,7 +35,7 @@ struct group_info {
89183 int nblocks;
89184 kgid_t small_block[NGROUPS_SMALL];
89185 kgid_t *blocks[0];
89186-};
89187+} __randomize_layout;
89188
89189 /**
89190 * get_group_info - Get a reference to a group info structure
89191@@ -137,7 +137,7 @@ struct cred {
89192 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
89193 struct group_info *group_info; /* supplementary groups for euid/fsgid */
89194 struct rcu_head rcu; /* RCU deletion hook */
89195-};
89196+} __randomize_layout;
89197
89198 extern void __put_cred(struct cred *);
89199 extern void exit_creds(struct task_struct *);
89200@@ -195,6 +195,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
89201 static inline void validate_process_creds(void)
89202 {
89203 }
89204+static inline void validate_task_creds(struct task_struct *task)
89205+{
89206+}
89207 #endif
89208
89209 /**
89210@@ -332,6 +335,7 @@ static inline void put_cred(const struct cred *_cred)
89211
89212 #define task_uid(task) (task_cred_xxx((task), uid))
89213 #define task_euid(task) (task_cred_xxx((task), euid))
89214+#define task_securebits(task) (task_cred_xxx((task), securebits))
89215
89216 #define current_cred_xxx(xxx) \
89217 ({ \
89218diff --git a/include/linux/crypto.h b/include/linux/crypto.h
89219index fb5ef16..05d1e59 100644
89220--- a/include/linux/crypto.h
89221+++ b/include/linux/crypto.h
89222@@ -626,7 +626,7 @@ struct cipher_tfm {
89223 const u8 *key, unsigned int keylen);
89224 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
89225 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
89226-};
89227+} __no_const;
89228
89229 struct hash_tfm {
89230 int (*init)(struct hash_desc *desc);
89231@@ -647,13 +647,13 @@ struct compress_tfm {
89232 int (*cot_decompress)(struct crypto_tfm *tfm,
89233 const u8 *src, unsigned int slen,
89234 u8 *dst, unsigned int *dlen);
89235-};
89236+} __no_const;
89237
89238 struct rng_tfm {
89239 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
89240 unsigned int dlen);
89241 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
89242-};
89243+} __no_const;
89244
89245 #define crt_ablkcipher crt_u.ablkcipher
89246 #define crt_aead crt_u.aead
89247diff --git a/include/linux/ctype.h b/include/linux/ctype.h
89248index 653589e..4ef254a 100644
89249--- a/include/linux/ctype.h
89250+++ b/include/linux/ctype.h
89251@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
89252 * Fast implementation of tolower() for internal usage. Do not use in your
89253 * code.
89254 */
89255-static inline char _tolower(const char c)
89256+static inline unsigned char _tolower(const unsigned char c)
89257 {
89258 return c | 0x20;
89259 }
89260diff --git a/include/linux/dcache.h b/include/linux/dcache.h
89261index d835879..c8e5b92 100644
89262--- a/include/linux/dcache.h
89263+++ b/include/linux/dcache.h
89264@@ -123,6 +123,9 @@ struct dentry {
89265 unsigned long d_time; /* used by d_revalidate */
89266 void *d_fsdata; /* fs-specific data */
89267
89268+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
89269+ atomic_t chroot_refcnt; /* tracks use of directory in chroot */
89270+#endif
89271 struct list_head d_lru; /* LRU list */
89272 struct list_head d_child; /* child of parent list */
89273 struct list_head d_subdirs; /* our children */
89274@@ -133,7 +136,7 @@ struct dentry {
89275 struct hlist_node d_alias; /* inode alias list */
89276 struct rcu_head d_rcu;
89277 } d_u;
89278-};
89279+} __randomize_layout;
89280
89281 /*
89282 * dentry->d_lock spinlock nesting subclasses:
89283@@ -319,7 +322,7 @@ extern struct dentry *__d_lookup_rcu(const struct dentry *parent,
89284
89285 static inline unsigned d_count(const struct dentry *dentry)
89286 {
89287- return dentry->d_lockref.count;
89288+ return __lockref_read(&dentry->d_lockref);
89289 }
89290
89291 /*
89292@@ -347,7 +350,7 @@ extern char *dentry_path(struct dentry *, char *, int);
89293 static inline struct dentry *dget_dlock(struct dentry *dentry)
89294 {
89295 if (dentry)
89296- dentry->d_lockref.count++;
89297+ __lockref_inc(&dentry->d_lockref);
89298 return dentry;
89299 }
89300
89301diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
89302index 7925bf0..d5143d2 100644
89303--- a/include/linux/decompress/mm.h
89304+++ b/include/linux/decompress/mm.h
89305@@ -77,7 +77,7 @@ static void free(void *where)
89306 * warnings when not needed (indeed large_malloc / large_free are not
89307 * needed by inflate */
89308
89309-#define malloc(a) kmalloc(a, GFP_KERNEL)
89310+#define malloc(a) kmalloc((a), GFP_KERNEL)
89311 #define free(a) kfree(a)
89312
89313 #define large_malloc(a) vmalloc(a)
89314diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
89315index ce447f0..83c66bd 100644
89316--- a/include/linux/devfreq.h
89317+++ b/include/linux/devfreq.h
89318@@ -114,7 +114,7 @@ struct devfreq_governor {
89319 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
89320 int (*event_handler)(struct devfreq *devfreq,
89321 unsigned int event, void *data);
89322-};
89323+} __do_const;
89324
89325 /**
89326 * struct devfreq - Device devfreq structure
89327diff --git a/include/linux/device.h b/include/linux/device.h
89328index 0eb8ee2..c603b6a 100644
89329--- a/include/linux/device.h
89330+++ b/include/linux/device.h
89331@@ -311,7 +311,7 @@ struct subsys_interface {
89332 struct list_head node;
89333 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
89334 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
89335-};
89336+} __do_const;
89337
89338 int subsys_interface_register(struct subsys_interface *sif);
89339 void subsys_interface_unregister(struct subsys_interface *sif);
89340@@ -507,7 +507,7 @@ struct device_type {
89341 void (*release)(struct device *dev);
89342
89343 const struct dev_pm_ops *pm;
89344-};
89345+} __do_const;
89346
89347 /* interface for exporting device attributes */
89348 struct device_attribute {
89349@@ -517,11 +517,12 @@ struct device_attribute {
89350 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
89351 const char *buf, size_t count);
89352 };
89353+typedef struct device_attribute __no_const device_attribute_no_const;
89354
89355 struct dev_ext_attribute {
89356 struct device_attribute attr;
89357 void *var;
89358-};
89359+} __do_const;
89360
89361 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
89362 char *buf);
89363diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
89364index c3007cb..43efc8c 100644
89365--- a/include/linux/dma-mapping.h
89366+++ b/include/linux/dma-mapping.h
89367@@ -60,7 +60,7 @@ struct dma_map_ops {
89368 u64 (*get_required_mask)(struct device *dev);
89369 #endif
89370 int is_phys;
89371-};
89372+} __do_const;
89373
89374 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
89375
89376diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
89377index b6997a0..108be6c 100644
89378--- a/include/linux/dmaengine.h
89379+++ b/include/linux/dmaengine.h
89380@@ -1133,9 +1133,9 @@ struct dma_pinned_list {
89381 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
89382 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
89383
89384-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
89385+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
89386 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
89387-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
89388+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
89389 struct dma_pinned_list *pinned_list, struct page *page,
89390 unsigned int offset, size_t len);
89391
89392diff --git a/include/linux/efi.h b/include/linux/efi.h
89393index cf7e431..d239dce 100644
89394--- a/include/linux/efi.h
89395+++ b/include/linux/efi.h
89396@@ -1056,6 +1056,7 @@ struct efivar_operations {
89397 efi_set_variable_nonblocking_t *set_variable_nonblocking;
89398 efi_query_variable_store_t *query_variable_store;
89399 };
89400+typedef struct efivar_operations __no_const efivar_operations_no_const;
89401
89402 struct efivars {
89403 /*
89404diff --git a/include/linux/elf.h b/include/linux/elf.h
89405index 20fa8d8..3d0dd18 100644
89406--- a/include/linux/elf.h
89407+++ b/include/linux/elf.h
89408@@ -29,6 +29,7 @@ extern Elf32_Dyn _DYNAMIC [];
89409 #define elf_note elf32_note
89410 #define elf_addr_t Elf32_Off
89411 #define Elf_Half Elf32_Half
89412+#define elf_dyn Elf32_Dyn
89413
89414 #else
89415
89416@@ -39,6 +40,7 @@ extern Elf64_Dyn _DYNAMIC [];
89417 #define elf_note elf64_note
89418 #define elf_addr_t Elf64_Off
89419 #define Elf_Half Elf64_Half
89420+#define elf_dyn Elf64_Dyn
89421
89422 #endif
89423
89424diff --git a/include/linux/err.h b/include/linux/err.h
89425index a729120..6ede2c9 100644
89426--- a/include/linux/err.h
89427+++ b/include/linux/err.h
89428@@ -20,12 +20,12 @@
89429
89430 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
89431
89432-static inline void * __must_check ERR_PTR(long error)
89433+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
89434 {
89435 return (void *) error;
89436 }
89437
89438-static inline long __must_check PTR_ERR(__force const void *ptr)
89439+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
89440 {
89441 return (long) ptr;
89442 }
89443diff --git a/include/linux/extcon.h b/include/linux/extcon.h
89444index 36f49c4..a2a1f4c 100644
89445--- a/include/linux/extcon.h
89446+++ b/include/linux/extcon.h
89447@@ -135,7 +135,7 @@ struct extcon_dev {
89448 /* /sys/class/extcon/.../mutually_exclusive/... */
89449 struct attribute_group attr_g_muex;
89450 struct attribute **attrs_muex;
89451- struct device_attribute *d_attrs_muex;
89452+ device_attribute_no_const *d_attrs_muex;
89453 };
89454
89455 /**
89456diff --git a/include/linux/fb.h b/include/linux/fb.h
89457index 043f328..180ccbf 100644
89458--- a/include/linux/fb.h
89459+++ b/include/linux/fb.h
89460@@ -305,7 +305,8 @@ struct fb_ops {
89461 /* called at KDB enter and leave time to prepare the console */
89462 int (*fb_debug_enter)(struct fb_info *info);
89463 int (*fb_debug_leave)(struct fb_info *info);
89464-};
89465+} __do_const;
89466+typedef struct fb_ops __no_const fb_ops_no_const;
89467
89468 #ifdef CONFIG_FB_TILEBLITTING
89469 #define FB_TILE_CURSOR_NONE 0
89470diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
89471index 230f87b..1fd0485 100644
89472--- a/include/linux/fdtable.h
89473+++ b/include/linux/fdtable.h
89474@@ -100,7 +100,7 @@ struct files_struct *get_files_struct(struct task_struct *);
89475 void put_files_struct(struct files_struct *fs);
89476 void reset_files_struct(struct files_struct *);
89477 int unshare_files(struct files_struct **);
89478-struct files_struct *dup_fd(struct files_struct *, int *);
89479+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
89480 void do_close_on_exec(struct files_struct *);
89481 int iterate_fd(struct files_struct *, unsigned,
89482 int (*)(const void *, struct file *, unsigned),
89483diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
89484index 8293262..2b3b8bd 100644
89485--- a/include/linux/frontswap.h
89486+++ b/include/linux/frontswap.h
89487@@ -11,7 +11,7 @@ struct frontswap_ops {
89488 int (*load)(unsigned, pgoff_t, struct page *);
89489 void (*invalidate_page)(unsigned, pgoff_t);
89490 void (*invalidate_area)(unsigned);
89491-};
89492+} __no_const;
89493
89494 extern bool frontswap_enabled;
89495 extern struct frontswap_ops *
89496diff --git a/include/linux/fs.h b/include/linux/fs.h
89497index 52cc449..31f35cb 100644
89498--- a/include/linux/fs.h
89499+++ b/include/linux/fs.h
89500@@ -410,7 +410,7 @@ struct address_space {
89501 spinlock_t private_lock; /* for use by the address_space */
89502 struct list_head private_list; /* ditto */
89503 void *private_data; /* ditto */
89504-} __attribute__((aligned(sizeof(long))));
89505+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
89506 /*
89507 * On most architectures that alignment is already the case; but
89508 * must be enforced here for CRIS, to let the least significant bit
89509@@ -453,7 +453,7 @@ struct block_device {
89510 int bd_fsfreeze_count;
89511 /* Mutex for freeze */
89512 struct mutex bd_fsfreeze_mutex;
89513-};
89514+} __randomize_layout;
89515
89516 /*
89517 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
89518@@ -639,7 +639,7 @@ struct inode {
89519 #endif
89520
89521 void *i_private; /* fs or device private pointer */
89522-};
89523+} __randomize_layout;
89524
89525 static inline int inode_unhashed(struct inode *inode)
89526 {
89527@@ -834,7 +834,7 @@ struct file {
89528 struct list_head f_tfile_llink;
89529 #endif /* #ifdef CONFIG_EPOLL */
89530 struct address_space *f_mapping;
89531-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
89532+} __attribute__((aligned(4))) __randomize_layout; /* lest something weird decides that 2 is OK */
89533
89534 struct file_handle {
89535 __u32 handle_bytes;
89536@@ -962,7 +962,7 @@ struct file_lock {
89537 int state; /* state of grant or error if -ve */
89538 } afs;
89539 } fl_u;
89540-};
89541+} __randomize_layout;
89542
89543 struct file_lock_context {
89544 spinlock_t flc_lock;
89545@@ -1316,7 +1316,7 @@ struct super_block {
89546 * Indicates how deep in a filesystem stack this SB is
89547 */
89548 int s_stack_depth;
89549-};
89550+} __randomize_layout;
89551
89552 extern struct timespec current_fs_time(struct super_block *sb);
89553
89554@@ -1570,7 +1570,8 @@ struct file_operations {
89555 #ifndef CONFIG_MMU
89556 unsigned (*mmap_capabilities)(struct file *);
89557 #endif
89558-};
89559+} __do_const __randomize_layout;
89560+typedef struct file_operations __no_const file_operations_no_const;
89561
89562 struct inode_operations {
89563 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
89564@@ -2269,7 +2270,7 @@ extern int register_chrdev_region(dev_t, unsigned, const char *);
89565 extern int __register_chrdev(unsigned int major, unsigned int baseminor,
89566 unsigned int count, const char *name,
89567 const struct file_operations *fops);
89568-extern void __unregister_chrdev(unsigned int major, unsigned int baseminor,
89569+extern __nocapture(4) void __unregister_chrdev(unsigned int major, unsigned int baseminor,
89570 unsigned int count, const char *name);
89571 extern void unregister_chrdev_region(dev_t, unsigned);
89572 extern void chrdev_show(struct seq_file *,off_t);
89573@@ -2918,4 +2919,14 @@ static inline bool dir_relax(struct inode *inode)
89574 return !IS_DEADDIR(inode);
89575 }
89576
89577+static inline bool is_sidechannel_device(const struct inode *inode)
89578+{
89579+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
89580+ umode_t mode = inode->i_mode;
89581+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
89582+#else
89583+ return false;
89584+#endif
89585+}
89586+
89587 #endif /* _LINUX_FS_H */
89588diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
89589index 0efc3e6..fd23610 100644
89590--- a/include/linux/fs_struct.h
89591+++ b/include/linux/fs_struct.h
89592@@ -6,13 +6,13 @@
89593 #include <linux/seqlock.h>
89594
89595 struct fs_struct {
89596- int users;
89597+ atomic_t users;
89598 spinlock_t lock;
89599 seqcount_t seq;
89600 int umask;
89601 int in_exec;
89602 struct path root, pwd;
89603-};
89604+} __randomize_layout;
89605
89606 extern struct kmem_cache *fs_cachep;
89607
89608diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
89609index 7714849..a4a5c7a 100644
89610--- a/include/linux/fscache-cache.h
89611+++ b/include/linux/fscache-cache.h
89612@@ -113,7 +113,7 @@ struct fscache_operation {
89613 fscache_operation_release_t release;
89614 };
89615
89616-extern atomic_t fscache_op_debug_id;
89617+extern atomic_unchecked_t fscache_op_debug_id;
89618 extern void fscache_op_work_func(struct work_struct *work);
89619
89620 extern void fscache_enqueue_operation(struct fscache_operation *);
89621@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
89622 INIT_WORK(&op->work, fscache_op_work_func);
89623 atomic_set(&op->usage, 1);
89624 op->state = FSCACHE_OP_ST_INITIALISED;
89625- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
89626+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
89627 op->processor = processor;
89628 op->release = release;
89629 INIT_LIST_HEAD(&op->pend_link);
89630diff --git a/include/linux/fscache.h b/include/linux/fscache.h
89631index 115bb81..e7b812b 100644
89632--- a/include/linux/fscache.h
89633+++ b/include/linux/fscache.h
89634@@ -152,7 +152,7 @@ struct fscache_cookie_def {
89635 * - this is mandatory for any object that may have data
89636 */
89637 void (*now_uncached)(void *cookie_netfs_data);
89638-};
89639+} __do_const;
89640
89641 /*
89642 * fscache cached network filesystem type
89643diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
89644index 7ee1774..72505b8 100644
89645--- a/include/linux/fsnotify.h
89646+++ b/include/linux/fsnotify.h
89647@@ -197,6 +197,9 @@ static inline void fsnotify_access(struct file *file)
89648 struct inode *inode = file_inode(file);
89649 __u32 mask = FS_ACCESS;
89650
89651+ if (is_sidechannel_device(inode))
89652+ return;
89653+
89654 if (S_ISDIR(inode->i_mode))
89655 mask |= FS_ISDIR;
89656
89657@@ -215,6 +218,9 @@ static inline void fsnotify_modify(struct file *file)
89658 struct inode *inode = file_inode(file);
89659 __u32 mask = FS_MODIFY;
89660
89661+ if (is_sidechannel_device(inode))
89662+ return;
89663+
89664 if (S_ISDIR(inode->i_mode))
89665 mask |= FS_ISDIR;
89666
89667@@ -317,7 +323,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
89668 */
89669 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
89670 {
89671- return kstrdup(name, GFP_KERNEL);
89672+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
89673 }
89674
89675 /*
89676diff --git a/include/linux/genhd.h b/include/linux/genhd.h
89677index ec274e0..e678159 100644
89678--- a/include/linux/genhd.h
89679+++ b/include/linux/genhd.h
89680@@ -194,7 +194,7 @@ struct gendisk {
89681 struct kobject *slave_dir;
89682
89683 struct timer_rand_state *random;
89684- atomic_t sync_io; /* RAID */
89685+ atomic_unchecked_t sync_io; /* RAID */
89686 struct disk_events *ev;
89687 #ifdef CONFIG_BLK_DEV_INTEGRITY
89688 struct blk_integrity *integrity;
89689@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
89690 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
89691
89692 /* drivers/char/random.c */
89693-extern void add_disk_randomness(struct gendisk *disk);
89694+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
89695 extern void rand_initialize_disk(struct gendisk *disk);
89696
89697 static inline sector_t get_start_sect(struct block_device *bdev)
89698diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
89699index 667c311..abac2a7 100644
89700--- a/include/linux/genl_magic_func.h
89701+++ b/include/linux/genl_magic_func.h
89702@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
89703 },
89704
89705 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
89706-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
89707+static struct genl_ops ZZZ_genl_ops[] = {
89708 #include GENL_MAGIC_INCLUDE_FILE
89709 };
89710
89711diff --git a/include/linux/gfp.h b/include/linux/gfp.h
89712index 51bd1e7..0486343 100644
89713--- a/include/linux/gfp.h
89714+++ b/include/linux/gfp.h
89715@@ -34,6 +34,13 @@ struct vm_area_struct;
89716 #define ___GFP_NO_KSWAPD 0x400000u
89717 #define ___GFP_OTHER_NODE 0x800000u
89718 #define ___GFP_WRITE 0x1000000u
89719+
89720+#ifdef CONFIG_PAX_USERCOPY_SLABS
89721+#define ___GFP_USERCOPY 0x2000000u
89722+#else
89723+#define ___GFP_USERCOPY 0
89724+#endif
89725+
89726 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
89727
89728 /*
89729@@ -90,6 +97,7 @@ struct vm_area_struct;
89730 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
89731 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
89732 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
89733+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
89734
89735 /*
89736 * This may seem redundant, but it's a way of annotating false positives vs.
89737@@ -97,7 +105,7 @@ struct vm_area_struct;
89738 */
89739 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
89740
89741-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
89742+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
89743 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
89744
89745 /* This equals 0, but use constants in case they ever change */
89746@@ -152,6 +160,8 @@ struct vm_area_struct;
89747 /* 4GB DMA on some platforms */
89748 #define GFP_DMA32 __GFP_DMA32
89749
89750+#define GFP_USERCOPY __GFP_USERCOPY
89751+
89752 /* Convert GFP flags to their corresponding migrate type */
89753 static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
89754 {
89755diff --git a/include/linux/gracl.h b/include/linux/gracl.h
89756new file mode 100644
89757index 0000000..91858e4
89758--- /dev/null
89759+++ b/include/linux/gracl.h
89760@@ -0,0 +1,342 @@
89761+#ifndef GR_ACL_H
89762+#define GR_ACL_H
89763+
89764+#include <linux/grdefs.h>
89765+#include <linux/resource.h>
89766+#include <linux/capability.h>
89767+#include <linux/dcache.h>
89768+#include <asm/resource.h>
89769+
89770+/* Major status information */
89771+
89772+#define GR_VERSION "grsecurity 3.1"
89773+#define GRSECURITY_VERSION 0x3100
89774+
89775+enum {
89776+ GR_SHUTDOWN = 0,
89777+ GR_ENABLE = 1,
89778+ GR_SPROLE = 2,
89779+ GR_OLDRELOAD = 3,
89780+ GR_SEGVMOD = 4,
89781+ GR_STATUS = 5,
89782+ GR_UNSPROLE = 6,
89783+ GR_PASSSET = 7,
89784+ GR_SPROLEPAM = 8,
89785+ GR_RELOAD = 9,
89786+};
89787+
89788+/* Password setup definitions
89789+ * kernel/grhash.c */
89790+enum {
89791+ GR_PW_LEN = 128,
89792+ GR_SALT_LEN = 16,
89793+ GR_SHA_LEN = 32,
89794+};
89795+
89796+enum {
89797+ GR_SPROLE_LEN = 64,
89798+};
89799+
89800+enum {
89801+ GR_NO_GLOB = 0,
89802+ GR_REG_GLOB,
89803+ GR_CREATE_GLOB
89804+};
89805+
89806+#define GR_NLIMITS 32
89807+
89808+/* Begin Data Structures */
89809+
89810+struct sprole_pw {
89811+ unsigned char *rolename;
89812+ unsigned char salt[GR_SALT_LEN];
89813+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
89814+};
89815+
89816+struct name_entry {
89817+ __u32 key;
89818+ u64 inode;
89819+ dev_t device;
89820+ char *name;
89821+ __u16 len;
89822+ __u8 deleted;
89823+ struct name_entry *prev;
89824+ struct name_entry *next;
89825+};
89826+
89827+struct inodev_entry {
89828+ struct name_entry *nentry;
89829+ struct inodev_entry *prev;
89830+ struct inodev_entry *next;
89831+};
89832+
89833+struct acl_role_db {
89834+ struct acl_role_label **r_hash;
89835+ __u32 r_size;
89836+};
89837+
89838+struct inodev_db {
89839+ struct inodev_entry **i_hash;
89840+ __u32 i_size;
89841+};
89842+
89843+struct name_db {
89844+ struct name_entry **n_hash;
89845+ __u32 n_size;
89846+};
89847+
89848+struct crash_uid {
89849+ uid_t uid;
89850+ unsigned long expires;
89851+};
89852+
89853+struct gr_hash_struct {
89854+ void **table;
89855+ void **nametable;
89856+ void *first;
89857+ __u32 table_size;
89858+ __u32 used_size;
89859+ int type;
89860+};
89861+
89862+/* Userspace Grsecurity ACL data structures */
89863+
89864+struct acl_subject_label {
89865+ char *filename;
89866+ u64 inode;
89867+ dev_t device;
89868+ __u32 mode;
89869+ kernel_cap_t cap_mask;
89870+ kernel_cap_t cap_lower;
89871+ kernel_cap_t cap_invert_audit;
89872+
89873+ struct rlimit res[GR_NLIMITS];
89874+ __u32 resmask;
89875+
89876+ __u8 user_trans_type;
89877+ __u8 group_trans_type;
89878+ uid_t *user_transitions;
89879+ gid_t *group_transitions;
89880+ __u16 user_trans_num;
89881+ __u16 group_trans_num;
89882+
89883+ __u32 sock_families[2];
89884+ __u32 ip_proto[8];
89885+ __u32 ip_type;
89886+ struct acl_ip_label **ips;
89887+ __u32 ip_num;
89888+ __u32 inaddr_any_override;
89889+
89890+ __u32 crashes;
89891+ unsigned long expires;
89892+
89893+ struct acl_subject_label *parent_subject;
89894+ struct gr_hash_struct *hash;
89895+ struct acl_subject_label *prev;
89896+ struct acl_subject_label *next;
89897+
89898+ struct acl_object_label **obj_hash;
89899+ __u32 obj_hash_size;
89900+ __u16 pax_flags;
89901+};
89902+
89903+struct role_allowed_ip {
89904+ __u32 addr;
89905+ __u32 netmask;
89906+
89907+ struct role_allowed_ip *prev;
89908+ struct role_allowed_ip *next;
89909+};
89910+
89911+struct role_transition {
89912+ char *rolename;
89913+
89914+ struct role_transition *prev;
89915+ struct role_transition *next;
89916+};
89917+
89918+struct acl_role_label {
89919+ char *rolename;
89920+ uid_t uidgid;
89921+ __u16 roletype;
89922+
89923+ __u16 auth_attempts;
89924+ unsigned long expires;
89925+
89926+ struct acl_subject_label *root_label;
89927+ struct gr_hash_struct *hash;
89928+
89929+ struct acl_role_label *prev;
89930+ struct acl_role_label *next;
89931+
89932+ struct role_transition *transitions;
89933+ struct role_allowed_ip *allowed_ips;
89934+ uid_t *domain_children;
89935+ __u16 domain_child_num;
89936+
89937+ umode_t umask;
89938+
89939+ struct acl_subject_label **subj_hash;
89940+ __u32 subj_hash_size;
89941+};
89942+
89943+struct user_acl_role_db {
89944+ struct acl_role_label **r_table;
89945+ __u32 num_pointers; /* Number of allocations to track */
89946+ __u32 num_roles; /* Number of roles */
89947+ __u32 num_domain_children; /* Number of domain children */
89948+ __u32 num_subjects; /* Number of subjects */
89949+ __u32 num_objects; /* Number of objects */
89950+};
89951+
89952+struct acl_object_label {
89953+ char *filename;
89954+ u64 inode;
89955+ dev_t device;
89956+ __u32 mode;
89957+
89958+ struct acl_subject_label *nested;
89959+ struct acl_object_label *globbed;
89960+
89961+ /* next two structures not used */
89962+
89963+ struct acl_object_label *prev;
89964+ struct acl_object_label *next;
89965+};
89966+
89967+struct acl_ip_label {
89968+ char *iface;
89969+ __u32 addr;
89970+ __u32 netmask;
89971+ __u16 low, high;
89972+ __u8 mode;
89973+ __u32 type;
89974+ __u32 proto[8];
89975+
89976+ /* next two structures not used */
89977+
89978+ struct acl_ip_label *prev;
89979+ struct acl_ip_label *next;
89980+};
89981+
89982+struct gr_arg {
89983+ struct user_acl_role_db role_db;
89984+ unsigned char pw[GR_PW_LEN];
89985+ unsigned char salt[GR_SALT_LEN];
89986+ unsigned char sum[GR_SHA_LEN];
89987+ unsigned char sp_role[GR_SPROLE_LEN];
89988+ struct sprole_pw *sprole_pws;
89989+ dev_t segv_device;
89990+ u64 segv_inode;
89991+ uid_t segv_uid;
89992+ __u16 num_sprole_pws;
89993+ __u16 mode;
89994+};
89995+
89996+struct gr_arg_wrapper {
89997+ struct gr_arg *arg;
89998+ __u32 version;
89999+ __u32 size;
90000+};
90001+
90002+struct subject_map {
90003+ struct acl_subject_label *user;
90004+ struct acl_subject_label *kernel;
90005+ struct subject_map *prev;
90006+ struct subject_map *next;
90007+};
90008+
90009+struct acl_subj_map_db {
90010+ struct subject_map **s_hash;
90011+ __u32 s_size;
90012+};
90013+
90014+struct gr_policy_state {
90015+ struct sprole_pw **acl_special_roles;
90016+ __u16 num_sprole_pws;
90017+ struct acl_role_label *kernel_role;
90018+ struct acl_role_label *role_list;
90019+ struct acl_role_label *default_role;
90020+ struct acl_role_db acl_role_set;
90021+ struct acl_subj_map_db subj_map_set;
90022+ struct name_db name_set;
90023+ struct inodev_db inodev_set;
90024+};
90025+
90026+struct gr_alloc_state {
90027+ unsigned long alloc_stack_next;
90028+ unsigned long alloc_stack_size;
90029+ void **alloc_stack;
90030+};
90031+
90032+struct gr_reload_state {
90033+ struct gr_policy_state oldpolicy;
90034+ struct gr_alloc_state oldalloc;
90035+ struct gr_policy_state newpolicy;
90036+ struct gr_alloc_state newalloc;
90037+ struct gr_policy_state *oldpolicy_ptr;
90038+ struct gr_alloc_state *oldalloc_ptr;
90039+ unsigned char oldmode;
90040+};
90041+
90042+/* End Data Structures Section */
90043+
90044+/* Hash functions generated by empirical testing by Brad Spengler
90045+ Makes good use of the low bits of the inode. Generally 0-1 times
90046+ in loop for successful match. 0-3 for unsuccessful match.
90047+ Shift/add algorithm with modulus of table size and an XOR*/
90048+
90049+static __inline__ unsigned int
90050+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
90051+{
90052+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
90053+}
90054+
90055+ static __inline__ unsigned int
90056+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
90057+{
90058+ return ((const unsigned long)userp % sz);
90059+}
90060+
90061+static __inline__ unsigned int
90062+gr_fhash(const u64 ino, const dev_t dev, const unsigned int sz)
90063+{
90064+ unsigned int rem;
90065+ div_u64_rem((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9)), sz, &rem);
90066+ return rem;
90067+}
90068+
90069+static __inline__ unsigned int
90070+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
90071+{
90072+ return full_name_hash((const unsigned char *)name, len) % sz;
90073+}
90074+
90075+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
90076+ subj = NULL; \
90077+ iter = 0; \
90078+ while (iter < role->subj_hash_size) { \
90079+ if (subj == NULL) \
90080+ subj = role->subj_hash[iter]; \
90081+ if (subj == NULL) { \
90082+ iter++; \
90083+ continue; \
90084+ }
90085+
90086+#define FOR_EACH_SUBJECT_END(subj,iter) \
90087+ subj = subj->next; \
90088+ if (subj == NULL) \
90089+ iter++; \
90090+ }
90091+
90092+
90093+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
90094+ subj = role->hash->first; \
90095+ while (subj != NULL) {
90096+
90097+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
90098+ subj = subj->next; \
90099+ }
90100+
90101+#endif
90102+
90103diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
90104new file mode 100644
90105index 0000000..af64092
90106--- /dev/null
90107+++ b/include/linux/gracl_compat.h
90108@@ -0,0 +1,156 @@
90109+#ifndef GR_ACL_COMPAT_H
90110+#define GR_ACL_COMPAT_H
90111+
90112+#include <linux/resource.h>
90113+#include <asm/resource.h>
90114+
90115+struct sprole_pw_compat {
90116+ compat_uptr_t rolename;
90117+ unsigned char salt[GR_SALT_LEN];
90118+ unsigned char sum[GR_SHA_LEN];
90119+};
90120+
90121+struct gr_hash_struct_compat {
90122+ compat_uptr_t table;
90123+ compat_uptr_t nametable;
90124+ compat_uptr_t first;
90125+ __u32 table_size;
90126+ __u32 used_size;
90127+ int type;
90128+};
90129+
90130+struct acl_subject_label_compat {
90131+ compat_uptr_t filename;
90132+ compat_u64 inode;
90133+ __u32 device;
90134+ __u32 mode;
90135+ kernel_cap_t cap_mask;
90136+ kernel_cap_t cap_lower;
90137+ kernel_cap_t cap_invert_audit;
90138+
90139+ struct compat_rlimit res[GR_NLIMITS];
90140+ __u32 resmask;
90141+
90142+ __u8 user_trans_type;
90143+ __u8 group_trans_type;
90144+ compat_uptr_t user_transitions;
90145+ compat_uptr_t group_transitions;
90146+ __u16 user_trans_num;
90147+ __u16 group_trans_num;
90148+
90149+ __u32 sock_families[2];
90150+ __u32 ip_proto[8];
90151+ __u32 ip_type;
90152+ compat_uptr_t ips;
90153+ __u32 ip_num;
90154+ __u32 inaddr_any_override;
90155+
90156+ __u32 crashes;
90157+ compat_ulong_t expires;
90158+
90159+ compat_uptr_t parent_subject;
90160+ compat_uptr_t hash;
90161+ compat_uptr_t prev;
90162+ compat_uptr_t next;
90163+
90164+ compat_uptr_t obj_hash;
90165+ __u32 obj_hash_size;
90166+ __u16 pax_flags;
90167+};
90168+
90169+struct role_allowed_ip_compat {
90170+ __u32 addr;
90171+ __u32 netmask;
90172+
90173+ compat_uptr_t prev;
90174+ compat_uptr_t next;
90175+};
90176+
90177+struct role_transition_compat {
90178+ compat_uptr_t rolename;
90179+
90180+ compat_uptr_t prev;
90181+ compat_uptr_t next;
90182+};
90183+
90184+struct acl_role_label_compat {
90185+ compat_uptr_t rolename;
90186+ uid_t uidgid;
90187+ __u16 roletype;
90188+
90189+ __u16 auth_attempts;
90190+ compat_ulong_t expires;
90191+
90192+ compat_uptr_t root_label;
90193+ compat_uptr_t hash;
90194+
90195+ compat_uptr_t prev;
90196+ compat_uptr_t next;
90197+
90198+ compat_uptr_t transitions;
90199+ compat_uptr_t allowed_ips;
90200+ compat_uptr_t domain_children;
90201+ __u16 domain_child_num;
90202+
90203+ umode_t umask;
90204+
90205+ compat_uptr_t subj_hash;
90206+ __u32 subj_hash_size;
90207+};
90208+
90209+struct user_acl_role_db_compat {
90210+ compat_uptr_t r_table;
90211+ __u32 num_pointers;
90212+ __u32 num_roles;
90213+ __u32 num_domain_children;
90214+ __u32 num_subjects;
90215+ __u32 num_objects;
90216+};
90217+
90218+struct acl_object_label_compat {
90219+ compat_uptr_t filename;
90220+ compat_u64 inode;
90221+ __u32 device;
90222+ __u32 mode;
90223+
90224+ compat_uptr_t nested;
90225+ compat_uptr_t globbed;
90226+
90227+ compat_uptr_t prev;
90228+ compat_uptr_t next;
90229+};
90230+
90231+struct acl_ip_label_compat {
90232+ compat_uptr_t iface;
90233+ __u32 addr;
90234+ __u32 netmask;
90235+ __u16 low, high;
90236+ __u8 mode;
90237+ __u32 type;
90238+ __u32 proto[8];
90239+
90240+ compat_uptr_t prev;
90241+ compat_uptr_t next;
90242+};
90243+
90244+struct gr_arg_compat {
90245+ struct user_acl_role_db_compat role_db;
90246+ unsigned char pw[GR_PW_LEN];
90247+ unsigned char salt[GR_SALT_LEN];
90248+ unsigned char sum[GR_SHA_LEN];
90249+ unsigned char sp_role[GR_SPROLE_LEN];
90250+ compat_uptr_t sprole_pws;
90251+ __u32 segv_device;
90252+ compat_u64 segv_inode;
90253+ uid_t segv_uid;
90254+ __u16 num_sprole_pws;
90255+ __u16 mode;
90256+};
90257+
90258+struct gr_arg_wrapper_compat {
90259+ compat_uptr_t arg;
90260+ __u32 version;
90261+ __u32 size;
90262+};
90263+
90264+#endif
90265diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
90266new file mode 100644
90267index 0000000..323ecf2
90268--- /dev/null
90269+++ b/include/linux/gralloc.h
90270@@ -0,0 +1,9 @@
90271+#ifndef __GRALLOC_H
90272+#define __GRALLOC_H
90273+
90274+void acl_free_all(void);
90275+int acl_alloc_stack_init(unsigned long size);
90276+void *acl_alloc(unsigned long len);
90277+void *acl_alloc_num(unsigned long num, unsigned long len);
90278+
90279+#endif
90280diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
90281new file mode 100644
90282index 0000000..be66033
90283--- /dev/null
90284+++ b/include/linux/grdefs.h
90285@@ -0,0 +1,140 @@
90286+#ifndef GRDEFS_H
90287+#define GRDEFS_H
90288+
90289+/* Begin grsecurity status declarations */
90290+
90291+enum {
90292+ GR_READY = 0x01,
90293+ GR_STATUS_INIT = 0x00 // disabled state
90294+};
90295+
90296+/* Begin ACL declarations */
90297+
90298+/* Role flags */
90299+
90300+enum {
90301+ GR_ROLE_USER = 0x0001,
90302+ GR_ROLE_GROUP = 0x0002,
90303+ GR_ROLE_DEFAULT = 0x0004,
90304+ GR_ROLE_SPECIAL = 0x0008,
90305+ GR_ROLE_AUTH = 0x0010,
90306+ GR_ROLE_NOPW = 0x0020,
90307+ GR_ROLE_GOD = 0x0040,
90308+ GR_ROLE_LEARN = 0x0080,
90309+ GR_ROLE_TPE = 0x0100,
90310+ GR_ROLE_DOMAIN = 0x0200,
90311+ GR_ROLE_PAM = 0x0400,
90312+ GR_ROLE_PERSIST = 0x0800
90313+};
90314+
90315+/* ACL Subject and Object mode flags */
90316+enum {
90317+ GR_DELETED = 0x80000000
90318+};
90319+
90320+/* ACL Object-only mode flags */
90321+enum {
90322+ GR_READ = 0x00000001,
90323+ GR_APPEND = 0x00000002,
90324+ GR_WRITE = 0x00000004,
90325+ GR_EXEC = 0x00000008,
90326+ GR_FIND = 0x00000010,
90327+ GR_INHERIT = 0x00000020,
90328+ GR_SETID = 0x00000040,
90329+ GR_CREATE = 0x00000080,
90330+ GR_DELETE = 0x00000100,
90331+ GR_LINK = 0x00000200,
90332+ GR_AUDIT_READ = 0x00000400,
90333+ GR_AUDIT_APPEND = 0x00000800,
90334+ GR_AUDIT_WRITE = 0x00001000,
90335+ GR_AUDIT_EXEC = 0x00002000,
90336+ GR_AUDIT_FIND = 0x00004000,
90337+ GR_AUDIT_INHERIT= 0x00008000,
90338+ GR_AUDIT_SETID = 0x00010000,
90339+ GR_AUDIT_CREATE = 0x00020000,
90340+ GR_AUDIT_DELETE = 0x00040000,
90341+ GR_AUDIT_LINK = 0x00080000,
90342+ GR_PTRACERD = 0x00100000,
90343+ GR_NOPTRACE = 0x00200000,
90344+ GR_SUPPRESS = 0x00400000,
90345+ GR_NOLEARN = 0x00800000,
90346+ GR_INIT_TRANSFER= 0x01000000
90347+};
90348+
90349+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
90350+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
90351+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
90352+
90353+/* ACL subject-only mode flags */
90354+enum {
90355+ GR_KILL = 0x00000001,
90356+ GR_VIEW = 0x00000002,
90357+ GR_PROTECTED = 0x00000004,
90358+ GR_LEARN = 0x00000008,
90359+ GR_OVERRIDE = 0x00000010,
90360+ /* just a placeholder, this mode is only used in userspace */
90361+ GR_DUMMY = 0x00000020,
90362+ GR_PROTSHM = 0x00000040,
90363+ GR_KILLPROC = 0x00000080,
90364+ GR_KILLIPPROC = 0x00000100,
90365+ /* just a placeholder, this mode is only used in userspace */
90366+ GR_NOTROJAN = 0x00000200,
90367+ GR_PROTPROCFD = 0x00000400,
90368+ GR_PROCACCT = 0x00000800,
90369+ GR_RELAXPTRACE = 0x00001000,
90370+ //GR_NESTED = 0x00002000,
90371+ GR_INHERITLEARN = 0x00004000,
90372+ GR_PROCFIND = 0x00008000,
90373+ GR_POVERRIDE = 0x00010000,
90374+ GR_KERNELAUTH = 0x00020000,
90375+ GR_ATSECURE = 0x00040000,
90376+ GR_SHMEXEC = 0x00080000
90377+};
90378+
90379+enum {
90380+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
90381+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
90382+ GR_PAX_ENABLE_MPROTECT = 0x0004,
90383+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
90384+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
90385+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
90386+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
90387+ GR_PAX_DISABLE_MPROTECT = 0x0400,
90388+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
90389+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
90390+};
90391+
90392+enum {
90393+ GR_ID_USER = 0x01,
90394+ GR_ID_GROUP = 0x02,
90395+};
90396+
90397+enum {
90398+ GR_ID_ALLOW = 0x01,
90399+ GR_ID_DENY = 0x02,
90400+};
90401+
90402+#define GR_CRASH_RES 31
90403+#define GR_UIDTABLE_MAX 500
90404+
90405+/* begin resource learning section */
90406+enum {
90407+ GR_RLIM_CPU_BUMP = 60,
90408+ GR_RLIM_FSIZE_BUMP = 50000,
90409+ GR_RLIM_DATA_BUMP = 10000,
90410+ GR_RLIM_STACK_BUMP = 1000,
90411+ GR_RLIM_CORE_BUMP = 10000,
90412+ GR_RLIM_RSS_BUMP = 500000,
90413+ GR_RLIM_NPROC_BUMP = 1,
90414+ GR_RLIM_NOFILE_BUMP = 5,
90415+ GR_RLIM_MEMLOCK_BUMP = 50000,
90416+ GR_RLIM_AS_BUMP = 500000,
90417+ GR_RLIM_LOCKS_BUMP = 2,
90418+ GR_RLIM_SIGPENDING_BUMP = 5,
90419+ GR_RLIM_MSGQUEUE_BUMP = 10000,
90420+ GR_RLIM_NICE_BUMP = 1,
90421+ GR_RLIM_RTPRIO_BUMP = 1,
90422+ GR_RLIM_RTTIME_BUMP = 1000000
90423+};
90424+
90425+#endif
90426diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
90427new file mode 100644
90428index 0000000..fb1de5d
90429--- /dev/null
90430+++ b/include/linux/grinternal.h
90431@@ -0,0 +1,230 @@
90432+#ifndef __GRINTERNAL_H
90433+#define __GRINTERNAL_H
90434+
90435+#ifdef CONFIG_GRKERNSEC
90436+
90437+#include <linux/fs.h>
90438+#include <linux/mnt_namespace.h>
90439+#include <linux/nsproxy.h>
90440+#include <linux/gracl.h>
90441+#include <linux/grdefs.h>
90442+#include <linux/grmsg.h>
90443+
90444+void gr_add_learn_entry(const char *fmt, ...)
90445+ __attribute__ ((format (printf, 1, 2)));
90446+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
90447+ const struct vfsmount *mnt);
90448+__u32 gr_check_create(const struct dentry *new_dentry,
90449+ const struct dentry *parent,
90450+ const struct vfsmount *mnt, const __u32 mode);
90451+int gr_check_protected_task(const struct task_struct *task);
90452+__u32 to_gr_audit(const __u32 reqmode);
90453+int gr_set_acls(const int type);
90454+int gr_acl_is_enabled(void);
90455+char gr_roletype_to_char(void);
90456+
90457+void gr_handle_alertkill(struct task_struct *task);
90458+char *gr_to_filename(const struct dentry *dentry,
90459+ const struct vfsmount *mnt);
90460+char *gr_to_filename1(const struct dentry *dentry,
90461+ const struct vfsmount *mnt);
90462+char *gr_to_filename2(const struct dentry *dentry,
90463+ const struct vfsmount *mnt);
90464+char *gr_to_filename3(const struct dentry *dentry,
90465+ const struct vfsmount *mnt);
90466+
90467+extern int grsec_enable_ptrace_readexec;
90468+extern int grsec_enable_harden_ptrace;
90469+extern int grsec_enable_link;
90470+extern int grsec_enable_fifo;
90471+extern int grsec_enable_execve;
90472+extern int grsec_enable_shm;
90473+extern int grsec_enable_execlog;
90474+extern int grsec_enable_signal;
90475+extern int grsec_enable_audit_ptrace;
90476+extern int grsec_enable_forkfail;
90477+extern int grsec_enable_time;
90478+extern int grsec_enable_rofs;
90479+extern int grsec_deny_new_usb;
90480+extern int grsec_enable_chroot_shmat;
90481+extern int grsec_enable_chroot_mount;
90482+extern int grsec_enable_chroot_double;
90483+extern int grsec_enable_chroot_pivot;
90484+extern int grsec_enable_chroot_chdir;
90485+extern int grsec_enable_chroot_chmod;
90486+extern int grsec_enable_chroot_mknod;
90487+extern int grsec_enable_chroot_fchdir;
90488+extern int grsec_enable_chroot_nice;
90489+extern int grsec_enable_chroot_execlog;
90490+extern int grsec_enable_chroot_caps;
90491+extern int grsec_enable_chroot_rename;
90492+extern int grsec_enable_chroot_sysctl;
90493+extern int grsec_enable_chroot_unix;
90494+extern int grsec_enable_symlinkown;
90495+extern kgid_t grsec_symlinkown_gid;
90496+extern int grsec_enable_tpe;
90497+extern kgid_t grsec_tpe_gid;
90498+extern int grsec_enable_tpe_all;
90499+extern int grsec_enable_tpe_invert;
90500+extern int grsec_enable_socket_all;
90501+extern kgid_t grsec_socket_all_gid;
90502+extern int grsec_enable_socket_client;
90503+extern kgid_t grsec_socket_client_gid;
90504+extern int grsec_enable_socket_server;
90505+extern kgid_t grsec_socket_server_gid;
90506+extern kgid_t grsec_audit_gid;
90507+extern int grsec_enable_group;
90508+extern int grsec_enable_log_rwxmaps;
90509+extern int grsec_enable_mount;
90510+extern int grsec_enable_chdir;
90511+extern int grsec_resource_logging;
90512+extern int grsec_enable_blackhole;
90513+extern int grsec_lastack_retries;
90514+extern int grsec_enable_brute;
90515+extern int grsec_enable_harden_ipc;
90516+extern int grsec_lock;
90517+
90518+extern spinlock_t grsec_alert_lock;
90519+extern unsigned long grsec_alert_wtime;
90520+extern unsigned long grsec_alert_fyet;
90521+
90522+extern spinlock_t grsec_audit_lock;
90523+
90524+extern rwlock_t grsec_exec_file_lock;
90525+
90526+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
90527+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
90528+ (tsk)->exec_file->f_path.mnt) : "/")
90529+
90530+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
90531+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
90532+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
90533+
90534+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
90535+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
90536+ (tsk)->exec_file->f_path.mnt) : "/")
90537+
90538+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
90539+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
90540+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
90541+
90542+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
90543+
90544+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
90545+
90546+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
90547+{
90548+ if (file1 && file2) {
90549+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
90550+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
90551+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
90552+ return true;
90553+ }
90554+
90555+ return false;
90556+}
90557+
90558+#define GR_CHROOT_CAPS {{ \
90559+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
90560+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
90561+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
90562+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
90563+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
90564+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
90565+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
90566+
90567+#define security_learn(normal_msg,args...) \
90568+({ \
90569+ read_lock(&grsec_exec_file_lock); \
90570+ gr_add_learn_entry(normal_msg "\n", ## args); \
90571+ read_unlock(&grsec_exec_file_lock); \
90572+})
90573+
90574+enum {
90575+ GR_DO_AUDIT,
90576+ GR_DONT_AUDIT,
90577+ /* used for non-audit messages that we shouldn't kill the task on */
90578+ GR_DONT_AUDIT_GOOD
90579+};
90580+
90581+enum {
90582+ GR_TTYSNIFF,
90583+ GR_RBAC,
90584+ GR_RBAC_STR,
90585+ GR_STR_RBAC,
90586+ GR_RBAC_MODE2,
90587+ GR_RBAC_MODE3,
90588+ GR_FILENAME,
90589+ GR_SYSCTL_HIDDEN,
90590+ GR_NOARGS,
90591+ GR_ONE_INT,
90592+ GR_ONE_INT_TWO_STR,
90593+ GR_ONE_STR,
90594+ GR_STR_INT,
90595+ GR_TWO_STR_INT,
90596+ GR_TWO_INT,
90597+ GR_TWO_U64,
90598+ GR_THREE_INT,
90599+ GR_FIVE_INT_TWO_STR,
90600+ GR_TWO_STR,
90601+ GR_THREE_STR,
90602+ GR_FOUR_STR,
90603+ GR_STR_FILENAME,
90604+ GR_FILENAME_STR,
90605+ GR_FILENAME_TWO_INT,
90606+ GR_FILENAME_TWO_INT_STR,
90607+ GR_TEXTREL,
90608+ GR_PTRACE,
90609+ GR_RESOURCE,
90610+ GR_CAP,
90611+ GR_SIG,
90612+ GR_SIG2,
90613+ GR_CRASH1,
90614+ GR_CRASH2,
90615+ GR_PSACCT,
90616+ GR_RWXMAP,
90617+ GR_RWXMAPVMA
90618+};
90619+
90620+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
90621+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
90622+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
90623+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
90624+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
90625+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
90626+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
90627+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
90628+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
90629+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
90630+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
90631+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
90632+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
90633+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
90634+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
90635+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
90636+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
90637+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
90638+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
90639+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
90640+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
90641+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
90642+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
90643+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
90644+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
90645+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
90646+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
90647+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
90648+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
90649+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
90650+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
90651+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
90652+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
90653+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
90654+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
90655+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
90656+
90657+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
90658+
90659+#endif
90660+
90661+#endif
90662diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
90663new file mode 100644
90664index 0000000..26ef560
90665--- /dev/null
90666+++ b/include/linux/grmsg.h
90667@@ -0,0 +1,118 @@
90668+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
90669+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
90670+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
90671+#define GR_STOPMOD_MSG "denied modification of module state by "
90672+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
90673+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
90674+#define GR_IOPERM_MSG "denied use of ioperm() by "
90675+#define GR_IOPL_MSG "denied use of iopl() by "
90676+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
90677+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
90678+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
90679+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
90680+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
90681+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
90682+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
90683+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
90684+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
90685+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
90686+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
90687+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
90688+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
90689+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
90690+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
90691+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
90692+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
90693+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
90694+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
90695+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
90696+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
90697+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
90698+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
90699+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
90700+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
90701+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
90702+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
90703+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
90704+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
90705+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
90706+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
90707+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
90708+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
90709+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
90710+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
90711+#define GR_CHROOT_RENAME_MSG "denied bad rename of %.950s out of a chroot by "
90712+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
90713+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
90714+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
90715+#define GR_CHROOT_FHANDLE_MSG "denied use of file handles inside chroot by "
90716+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
90717+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
90718+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
90719+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
90720+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
90721+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
90722+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
90723+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
90724+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
90725+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
90726+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
90727+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
90728+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
90729+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
90730+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
90731+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
90732+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
90733+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
90734+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
90735+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
90736+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
90737+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
90738+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
90739+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
90740+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
90741+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
90742+#define GR_FAILFORK_MSG "failed fork with errno %s by "
90743+#define GR_NICE_CHROOT_MSG "denied priority change by "
90744+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
90745+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
90746+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
90747+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
90748+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
90749+#define GR_TIME_MSG "time set by "
90750+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
90751+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
90752+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
90753+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
90754+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
90755+#define GR_BIND_MSG "denied bind() by "
90756+#define GR_CONNECT_MSG "denied connect() by "
90757+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
90758+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
90759+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
90760+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
90761+#define GR_CAP_ACL_MSG "use of %s denied for "
90762+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
90763+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
90764+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
90765+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
90766+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
90767+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
90768+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
90769+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
90770+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
90771+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
90772+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
90773+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
90774+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
90775+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
90776+#define GR_VM86_MSG "denied use of vm86 by "
90777+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
90778+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
90779+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
90780+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
90781+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
90782+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
90783+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
90784+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
90785+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
90786diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
90787new file mode 100644
90788index 0000000..63c1850
90789--- /dev/null
90790+++ b/include/linux/grsecurity.h
90791@@ -0,0 +1,250 @@
90792+#ifndef GR_SECURITY_H
90793+#define GR_SECURITY_H
90794+#include <linux/fs.h>
90795+#include <linux/fs_struct.h>
90796+#include <linux/binfmts.h>
90797+#include <linux/gracl.h>
90798+
90799+/* notify of brain-dead configs */
90800+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
90801+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
90802+#endif
90803+#if defined(CONFIG_GRKERNSEC_PROC) && !defined(CONFIG_GRKERNSEC_PROC_USER) && !defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
90804+#error "CONFIG_GRKERNSEC_PROC enabled, but neither CONFIG_GRKERNSEC_PROC_USER nor CONFIG_GRKERNSEC_PROC_USERGROUP enabled"
90805+#endif
90806+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
90807+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
90808+#endif
90809+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
90810+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
90811+#endif
90812+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
90813+#error "CONFIG_PAX enabled, but no PaX options are enabled."
90814+#endif
90815+
90816+int gr_handle_new_usb(void);
90817+
90818+void gr_handle_brute_attach(int dumpable);
90819+void gr_handle_brute_check(void);
90820+void gr_handle_kernel_exploit(void);
90821+
90822+char gr_roletype_to_char(void);
90823+
90824+int gr_proc_is_restricted(void);
90825+
90826+int gr_acl_enable_at_secure(void);
90827+
90828+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
90829+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
90830+
90831+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap);
90832+
90833+void gr_del_task_from_ip_table(struct task_struct *p);
90834+
90835+int gr_pid_is_chrooted(struct task_struct *p);
90836+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
90837+int gr_handle_chroot_nice(void);
90838+int gr_handle_chroot_sysctl(const int op);
90839+int gr_handle_chroot_setpriority(struct task_struct *p,
90840+ const int niceval);
90841+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
90842+int gr_chroot_fhandle(void);
90843+int gr_handle_chroot_chroot(const struct dentry *dentry,
90844+ const struct vfsmount *mnt);
90845+void gr_handle_chroot_chdir(const struct path *path);
90846+int gr_handle_chroot_chmod(const struct dentry *dentry,
90847+ const struct vfsmount *mnt, const int mode);
90848+int gr_handle_chroot_mknod(const struct dentry *dentry,
90849+ const struct vfsmount *mnt, const int mode);
90850+int gr_handle_chroot_mount(const struct dentry *dentry,
90851+ const struct vfsmount *mnt,
90852+ const char *dev_name);
90853+int gr_handle_chroot_pivot(void);
90854+int gr_handle_chroot_unix(const pid_t pid);
90855+
90856+int gr_handle_rawio(const struct inode *inode);
90857+
90858+void gr_handle_ioperm(void);
90859+void gr_handle_iopl(void);
90860+void gr_handle_msr_write(void);
90861+
90862+umode_t gr_acl_umask(void);
90863+
90864+int gr_tpe_allow(const struct file *file);
90865+
90866+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
90867+void gr_clear_chroot_entries(struct task_struct *task);
90868+
90869+void gr_log_forkfail(const int retval);
90870+void gr_log_timechange(void);
90871+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
90872+void gr_log_chdir(const struct dentry *dentry,
90873+ const struct vfsmount *mnt);
90874+void gr_log_chroot_exec(const struct dentry *dentry,
90875+ const struct vfsmount *mnt);
90876+void gr_log_remount(const char *devname, const int retval);
90877+void gr_log_unmount(const char *devname, const int retval);
90878+void gr_log_mount(const char *from, struct path *to, const int retval);
90879+void gr_log_textrel(struct vm_area_struct *vma);
90880+void gr_log_ptgnustack(struct file *file);
90881+void gr_log_rwxmmap(struct file *file);
90882+void gr_log_rwxmprotect(struct vm_area_struct *vma);
90883+
90884+int gr_handle_follow_link(const struct inode *parent,
90885+ const struct inode *inode,
90886+ const struct dentry *dentry,
90887+ const struct vfsmount *mnt);
90888+int gr_handle_fifo(const struct dentry *dentry,
90889+ const struct vfsmount *mnt,
90890+ const struct dentry *dir, const int flag,
90891+ const int acc_mode);
90892+int gr_handle_hardlink(const struct dentry *dentry,
90893+ const struct vfsmount *mnt,
90894+ struct inode *inode,
90895+ const int mode, const struct filename *to);
90896+
90897+int gr_is_capable(const int cap);
90898+int gr_is_capable_nolog(const int cap);
90899+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
90900+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
90901+
90902+void gr_copy_label(struct task_struct *tsk);
90903+void gr_handle_crash(struct task_struct *task, const int sig);
90904+int gr_handle_signal(const struct task_struct *p, const int sig);
90905+int gr_check_crash_uid(const kuid_t uid);
90906+int gr_check_protected_task(const struct task_struct *task);
90907+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
90908+int gr_acl_handle_mmap(const struct file *file,
90909+ const unsigned long prot);
90910+int gr_acl_handle_mprotect(const struct file *file,
90911+ const unsigned long prot);
90912+int gr_check_hidden_task(const struct task_struct *tsk);
90913+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
90914+ const struct vfsmount *mnt);
90915+__u32 gr_acl_handle_utime(const struct dentry *dentry,
90916+ const struct vfsmount *mnt);
90917+__u32 gr_acl_handle_access(const struct dentry *dentry,
90918+ const struct vfsmount *mnt, const int fmode);
90919+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
90920+ const struct vfsmount *mnt, umode_t *mode);
90921+__u32 gr_acl_handle_chown(const struct dentry *dentry,
90922+ const struct vfsmount *mnt);
90923+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
90924+ const struct vfsmount *mnt);
90925+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
90926+ const struct vfsmount *mnt);
90927+int gr_handle_ptrace(struct task_struct *task, const long request);
90928+int gr_handle_proc_ptrace(struct task_struct *task);
90929+__u32 gr_acl_handle_execve(const struct dentry *dentry,
90930+ const struct vfsmount *mnt);
90931+int gr_check_crash_exec(const struct file *filp);
90932+int gr_acl_is_enabled(void);
90933+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
90934+ const kgid_t gid);
90935+int gr_set_proc_label(const struct dentry *dentry,
90936+ const struct vfsmount *mnt,
90937+ const int unsafe_flags);
90938+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
90939+ const struct vfsmount *mnt);
90940+__u32 gr_acl_handle_open(const struct dentry *dentry,
90941+ const struct vfsmount *mnt, int acc_mode);
90942+__u32 gr_acl_handle_creat(const struct dentry *dentry,
90943+ const struct dentry *p_dentry,
90944+ const struct vfsmount *p_mnt,
90945+ int open_flags, int acc_mode, const int imode);
90946+void gr_handle_create(const struct dentry *dentry,
90947+ const struct vfsmount *mnt);
90948+void gr_handle_proc_create(const struct dentry *dentry,
90949+ const struct inode *inode);
90950+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
90951+ const struct dentry *parent_dentry,
90952+ const struct vfsmount *parent_mnt,
90953+ const int mode);
90954+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
90955+ const struct dentry *parent_dentry,
90956+ const struct vfsmount *parent_mnt);
90957+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
90958+ const struct vfsmount *mnt);
90959+void gr_handle_delete(const u64 ino, const dev_t dev);
90960+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
90961+ const struct vfsmount *mnt);
90962+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
90963+ const struct dentry *parent_dentry,
90964+ const struct vfsmount *parent_mnt,
90965+ const struct filename *from);
90966+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
90967+ const struct dentry *parent_dentry,
90968+ const struct vfsmount *parent_mnt,
90969+ const struct dentry *old_dentry,
90970+ const struct vfsmount *old_mnt, const struct filename *to);
90971+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
90972+int gr_acl_handle_rename(struct dentry *new_dentry,
90973+ struct dentry *parent_dentry,
90974+ const struct vfsmount *parent_mnt,
90975+ struct dentry *old_dentry,
90976+ struct inode *old_parent_inode,
90977+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags);
90978+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
90979+ struct dentry *old_dentry,
90980+ struct dentry *new_dentry,
90981+ struct vfsmount *mnt, const __u8 replace, unsigned int flags);
90982+__u32 gr_check_link(const struct dentry *new_dentry,
90983+ const struct dentry *parent_dentry,
90984+ const struct vfsmount *parent_mnt,
90985+ const struct dentry *old_dentry,
90986+ const struct vfsmount *old_mnt);
90987+int gr_acl_handle_filldir(const struct file *file, const char *name,
90988+ const unsigned int namelen, const u64 ino);
90989+
90990+__u32 gr_acl_handle_unix(const struct dentry *dentry,
90991+ const struct vfsmount *mnt);
90992+void gr_acl_handle_exit(void);
90993+void gr_acl_handle_psacct(struct task_struct *task, const long code);
90994+int gr_acl_handle_procpidmem(const struct task_struct *task);
90995+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
90996+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
90997+void gr_audit_ptrace(struct task_struct *task);
90998+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
90999+u64 gr_get_ino_from_dentry(struct dentry *dentry);
91000+void gr_put_exec_file(struct task_struct *task);
91001+
91002+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
91003+
91004+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
91005+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
91006+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
91007+ struct dentry *newdentry, struct vfsmount *newmnt);
91008+
91009+#ifdef CONFIG_GRKERNSEC_RESLOG
91010+extern void gr_log_resource(const struct task_struct *task, const int res,
91011+ const unsigned long wanted, const int gt);
91012+#else
91013+static inline void gr_log_resource(const struct task_struct *task, const int res,
91014+ const unsigned long wanted, const int gt)
91015+{
91016+}
91017+#endif
91018+
91019+#ifdef CONFIG_GRKERNSEC
91020+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
91021+void gr_handle_vm86(void);
91022+void gr_handle_mem_readwrite(u64 from, u64 to);
91023+
91024+void gr_log_badprocpid(const char *entry);
91025+
91026+extern int grsec_enable_dmesg;
91027+extern int grsec_disable_privio;
91028+
91029+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
91030+extern kgid_t grsec_proc_gid;
91031+#endif
91032+
91033+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
91034+extern int grsec_enable_chroot_findtask;
91035+#endif
91036+#ifdef CONFIG_GRKERNSEC_SETXID
91037+extern int grsec_enable_setxid;
91038+#endif
91039+#endif
91040+
91041+#endif
91042diff --git a/include/linux/grsock.h b/include/linux/grsock.h
91043new file mode 100644
91044index 0000000..e7ffaaf
91045--- /dev/null
91046+++ b/include/linux/grsock.h
91047@@ -0,0 +1,19 @@
91048+#ifndef __GRSOCK_H
91049+#define __GRSOCK_H
91050+
91051+extern void gr_attach_curr_ip(const struct sock *sk);
91052+extern int gr_handle_sock_all(const int family, const int type,
91053+ const int protocol);
91054+extern int gr_handle_sock_server(const struct sockaddr *sck);
91055+extern int gr_handle_sock_server_other(const struct sock *sck);
91056+extern int gr_handle_sock_client(const struct sockaddr *sck);
91057+extern int gr_search_connect(struct socket * sock,
91058+ struct sockaddr_in * addr);
91059+extern int gr_search_bind(struct socket * sock,
91060+ struct sockaddr_in * addr);
91061+extern int gr_search_listen(struct socket * sock);
91062+extern int gr_search_accept(struct socket * sock);
91063+extern int gr_search_socket(const int domain, const int type,
91064+ const int protocol);
91065+
91066+#endif
91067diff --git a/include/linux/highmem.h b/include/linux/highmem.h
91068index 9286a46..373f27f 100644
91069--- a/include/linux/highmem.h
91070+++ b/include/linux/highmem.h
91071@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
91072 kunmap_atomic(kaddr);
91073 }
91074
91075+static inline void sanitize_highpage(struct page *page)
91076+{
91077+ void *kaddr;
91078+ unsigned long flags;
91079+
91080+ local_irq_save(flags);
91081+ kaddr = kmap_atomic(page);
91082+ clear_page(kaddr);
91083+ kunmap_atomic(kaddr);
91084+ local_irq_restore(flags);
91085+}
91086+
91087 static inline void zero_user_segments(struct page *page,
91088 unsigned start1, unsigned end1,
91089 unsigned start2, unsigned end2)
91090diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
91091index 1c7b89a..7dda400 100644
91092--- a/include/linux/hwmon-sysfs.h
91093+++ b/include/linux/hwmon-sysfs.h
91094@@ -25,7 +25,8 @@
91095 struct sensor_device_attribute{
91096 struct device_attribute dev_attr;
91097 int index;
91098-};
91099+} __do_const;
91100+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
91101 #define to_sensor_dev_attr(_dev_attr) \
91102 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
91103
91104@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
91105 struct device_attribute dev_attr;
91106 u8 index;
91107 u8 nr;
91108-};
91109+} __do_const;
91110+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
91111 #define to_sensor_dev_attr_2(_dev_attr) \
91112 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
91113
91114diff --git a/include/linux/i2c.h b/include/linux/i2c.h
91115index f17da50..2f8b203 100644
91116--- a/include/linux/i2c.h
91117+++ b/include/linux/i2c.h
91118@@ -409,6 +409,7 @@ struct i2c_algorithm {
91119 int (*unreg_slave)(struct i2c_client *client);
91120 #endif
91121 };
91122+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
91123
91124 /**
91125 * struct i2c_bus_recovery_info - I2C bus recovery information
91126diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
91127index aff7ad8..3942bbd 100644
91128--- a/include/linux/if_pppox.h
91129+++ b/include/linux/if_pppox.h
91130@@ -76,7 +76,7 @@ struct pppox_proto {
91131 int (*ioctl)(struct socket *sock, unsigned int cmd,
91132 unsigned long arg);
91133 struct module *owner;
91134-};
91135+} __do_const;
91136
91137 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
91138 extern void unregister_pppox_proto(int proto_num);
91139diff --git a/include/linux/init.h b/include/linux/init.h
91140index 2df8e8d..3e1280d 100644
91141--- a/include/linux/init.h
91142+++ b/include/linux/init.h
91143@@ -37,9 +37,17 @@
91144 * section.
91145 */
91146
91147+#define add_init_latent_entropy __latent_entropy
91148+
91149+#ifdef CONFIG_MEMORY_HOTPLUG
91150+#define add_meminit_latent_entropy
91151+#else
91152+#define add_meminit_latent_entropy __latent_entropy
91153+#endif
91154+
91155 /* These are for everybody (although not all archs will actually
91156 discard it in modules) */
91157-#define __init __section(.init.text) __cold notrace
91158+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
91159 #define __initdata __section(.init.data)
91160 #define __initconst __constsection(.init.rodata)
91161 #define __exitdata __section(.exit.data)
91162@@ -100,7 +108,7 @@
91163 #define __cpuexitconst
91164
91165 /* Used for MEMORY_HOTPLUG */
91166-#define __meminit __section(.meminit.text) __cold notrace
91167+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
91168 #define __meminitdata __section(.meminit.data)
91169 #define __meminitconst __constsection(.meminit.rodata)
91170 #define __memexit __section(.memexit.text) __exitused __cold notrace
91171diff --git a/include/linux/init_task.h b/include/linux/init_task.h
91172index 696d223..6d6b39f 100644
91173--- a/include/linux/init_task.h
91174+++ b/include/linux/init_task.h
91175@@ -158,6 +158,12 @@ extern struct task_group root_task_group;
91176
91177 #define INIT_TASK_COMM "swapper"
91178
91179+#ifdef CONFIG_X86
91180+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
91181+#else
91182+#define INIT_TASK_THREAD_INFO
91183+#endif
91184+
91185 #ifdef CONFIG_RT_MUTEXES
91186 # define INIT_RT_MUTEXES(tsk) \
91187 .pi_waiters = RB_ROOT, \
91188@@ -224,6 +230,7 @@ extern struct task_group root_task_group;
91189 RCU_POINTER_INITIALIZER(cred, &init_cred), \
91190 .comm = INIT_TASK_COMM, \
91191 .thread = INIT_THREAD, \
91192+ INIT_TASK_THREAD_INFO \
91193 .fs = &init_fs, \
91194 .files = &init_files, \
91195 .signal = &init_signals, \
91196diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
91197index 2e88580..f6a99a0 100644
91198--- a/include/linux/interrupt.h
91199+++ b/include/linux/interrupt.h
91200@@ -420,8 +420,8 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
91201
91202 struct softirq_action
91203 {
91204- void (*action)(struct softirq_action *);
91205-};
91206+ void (*action)(void);
91207+} __no_const;
91208
91209 asmlinkage void do_softirq(void);
91210 asmlinkage void __do_softirq(void);
91211@@ -435,7 +435,7 @@ static inline void do_softirq_own_stack(void)
91212 }
91213 #endif
91214
91215-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
91216+extern void open_softirq(int nr, void (*action)(void));
91217 extern void softirq_init(void);
91218 extern void __raise_softirq_irqoff(unsigned int nr);
91219
91220diff --git a/include/linux/iommu.h b/include/linux/iommu.h
91221index 38daa45..4de4317 100644
91222--- a/include/linux/iommu.h
91223+++ b/include/linux/iommu.h
91224@@ -147,7 +147,7 @@ struct iommu_ops {
91225
91226 unsigned long pgsize_bitmap;
91227 void *priv;
91228-};
91229+} __do_const;
91230
91231 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
91232 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
91233diff --git a/include/linux/ioport.h b/include/linux/ioport.h
91234index 2c525022..345b106 100644
91235--- a/include/linux/ioport.h
91236+++ b/include/linux/ioport.h
91237@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
91238 int adjust_resource(struct resource *res, resource_size_t start,
91239 resource_size_t size);
91240 resource_size_t resource_alignment(struct resource *res);
91241-static inline resource_size_t resource_size(const struct resource *res)
91242+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
91243 {
91244 return res->end - res->start + 1;
91245 }
91246diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
91247index 1eee6bc..9cf4912 100644
91248--- a/include/linux/ipc_namespace.h
91249+++ b/include/linux/ipc_namespace.h
91250@@ -60,7 +60,7 @@ struct ipc_namespace {
91251 struct user_namespace *user_ns;
91252
91253 struct ns_common ns;
91254-};
91255+} __randomize_layout;
91256
91257 extern struct ipc_namespace init_ipc_ns;
91258 extern atomic_t nr_ipc_ns;
91259diff --git a/include/linux/irq.h b/include/linux/irq.h
91260index d09ec7a..f373eb5 100644
91261--- a/include/linux/irq.h
91262+++ b/include/linux/irq.h
91263@@ -364,7 +364,8 @@ struct irq_chip {
91264 void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
91265
91266 unsigned long flags;
91267-};
91268+} __do_const;
91269+typedef struct irq_chip __no_const irq_chip_no_const;
91270
91271 /*
91272 * irq_chip specific flags
91273diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
91274index 71d706d..817cdec 100644
91275--- a/include/linux/irqchip/arm-gic.h
91276+++ b/include/linux/irqchip/arm-gic.h
91277@@ -95,7 +95,7 @@
91278
91279 struct device_node;
91280
91281-extern struct irq_chip gic_arch_extn;
91282+extern irq_chip_no_const gic_arch_extn;
91283
91284 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
91285 u32 offset, struct device_node *);
91286diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
91287index dd1109f..4f4fdda 100644
91288--- a/include/linux/irqdesc.h
91289+++ b/include/linux/irqdesc.h
91290@@ -61,7 +61,7 @@ struct irq_desc {
91291 unsigned int irq_count; /* For detecting broken IRQs */
91292 unsigned long last_unhandled; /* Aging timer for unhandled count */
91293 unsigned int irqs_unhandled;
91294- atomic_t threads_handled;
91295+ atomic_unchecked_t threads_handled;
91296 int threads_handled_last;
91297 raw_spinlock_t lock;
91298 struct cpumask *percpu_enabled;
91299diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
91300index 676d730..8d3a1ad 100644
91301--- a/include/linux/irqdomain.h
91302+++ b/include/linux/irqdomain.h
91303@@ -40,6 +40,7 @@ struct device_node;
91304 struct irq_domain;
91305 struct of_device_id;
91306 struct irq_chip;
91307+typedef struct irq_chip __no_const irq_chip_no_const;
91308 struct irq_data;
91309
91310 /* Number of irqs reserved for a legacy isa controller */
91311diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
91312index c367cbd..c9b79e6 100644
91313--- a/include/linux/jiffies.h
91314+++ b/include/linux/jiffies.h
91315@@ -280,20 +280,20 @@ extern unsigned long preset_lpj;
91316 /*
91317 * Convert various time units to each other:
91318 */
91319-extern unsigned int jiffies_to_msecs(const unsigned long j);
91320-extern unsigned int jiffies_to_usecs(const unsigned long j);
91321+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
91322+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
91323
91324-static inline u64 jiffies_to_nsecs(const unsigned long j)
91325+static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j)
91326 {
91327 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
91328 }
91329
91330-extern unsigned long msecs_to_jiffies(const unsigned int m);
91331-extern unsigned long usecs_to_jiffies(const unsigned int u);
91332+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
91333+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
91334 extern unsigned long timespec_to_jiffies(const struct timespec *value);
91335 extern void jiffies_to_timespec(const unsigned long jiffies,
91336- struct timespec *value);
91337-extern unsigned long timeval_to_jiffies(const struct timeval *value);
91338+ struct timespec *value) __intentional_overflow(-1);
91339+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
91340 extern void jiffies_to_timeval(const unsigned long jiffies,
91341 struct timeval *value);
91342
91343diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
91344index 6883e19..e854fcb 100644
91345--- a/include/linux/kallsyms.h
91346+++ b/include/linux/kallsyms.h
91347@@ -15,7 +15,8 @@
91348
91349 struct module;
91350
91351-#ifdef CONFIG_KALLSYMS
91352+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
91353+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
91354 /* Lookup the address for a symbol. Returns 0 if not found. */
91355 unsigned long kallsyms_lookup_name(const char *name);
91356
91357@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
91358 /* Stupid that this does nothing, but I didn't create this mess. */
91359 #define __print_symbol(fmt, addr)
91360 #endif /*CONFIG_KALLSYMS*/
91361+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
91362+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
91363+extern unsigned long kallsyms_lookup_name(const char *name);
91364+extern void __print_symbol(const char *fmt, unsigned long address);
91365+extern int sprint_backtrace(char *buffer, unsigned long address);
91366+extern int sprint_symbol(char *buffer, unsigned long address);
91367+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
91368+const char *kallsyms_lookup(unsigned long addr,
91369+ unsigned long *symbolsize,
91370+ unsigned long *offset,
91371+ char **modname, char *namebuf);
91372+extern int kallsyms_lookup_size_offset(unsigned long addr,
91373+ unsigned long *symbolsize,
91374+ unsigned long *offset);
91375+#endif
91376
91377 /* This macro allows us to keep printk typechecking */
91378 static __printf(1, 2)
91379diff --git a/include/linux/kernel.h b/include/linux/kernel.h
91380index d6d630d..feea1f5 100644
91381--- a/include/linux/kernel.h
91382+++ b/include/linux/kernel.h
91383@@ -378,7 +378,7 @@ static inline int __must_check kstrtos32_from_user(const char __user *s, size_t
91384 /* Obsolete, do not use. Use kstrto<foo> instead */
91385
91386 extern unsigned long simple_strtoul(const char *,char **,unsigned int);
91387-extern long simple_strtol(const char *,char **,unsigned int);
91388+extern long simple_strtol(const char *,char **,unsigned int) __intentional_overflow(-1);
91389 extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
91390 extern long long simple_strtoll(const char *,char **,unsigned int);
91391
91392diff --git a/include/linux/key-type.h b/include/linux/key-type.h
91393index ff9f1d3..6712be5 100644
91394--- a/include/linux/key-type.h
91395+++ b/include/linux/key-type.h
91396@@ -152,7 +152,7 @@ struct key_type {
91397 /* internal fields */
91398 struct list_head link; /* link in types list */
91399 struct lock_class_key lock_class; /* key->sem lock class */
91400-};
91401+} __do_const;
91402
91403 extern struct key_type key_type_keyring;
91404
91405diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
91406index e465bb1..19f605fd 100644
91407--- a/include/linux/kgdb.h
91408+++ b/include/linux/kgdb.h
91409@@ -52,7 +52,7 @@ extern int kgdb_connected;
91410 extern int kgdb_io_module_registered;
91411
91412 extern atomic_t kgdb_setting_breakpoint;
91413-extern atomic_t kgdb_cpu_doing_single_step;
91414+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
91415
91416 extern struct task_struct *kgdb_usethread;
91417 extern struct task_struct *kgdb_contthread;
91418@@ -254,7 +254,7 @@ struct kgdb_arch {
91419 void (*correct_hw_break)(void);
91420
91421 void (*enable_nmi)(bool on);
91422-};
91423+} __do_const;
91424
91425 /**
91426 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
91427@@ -279,7 +279,7 @@ struct kgdb_io {
91428 void (*pre_exception) (void);
91429 void (*post_exception) (void);
91430 int is_console;
91431-};
91432+} __do_const;
91433
91434 extern struct kgdb_arch arch_kgdb_ops;
91435
91436diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
91437index e705467..a92471d 100644
91438--- a/include/linux/kmemleak.h
91439+++ b/include/linux/kmemleak.h
91440@@ -27,7 +27,7 @@
91441
91442 extern void kmemleak_init(void) __ref;
91443 extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
91444- gfp_t gfp) __ref;
91445+ gfp_t gfp) __ref __size_overflow(2);
91446 extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref;
91447 extern void kmemleak_free(const void *ptr) __ref;
91448 extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
91449@@ -62,7 +62,7 @@ static inline void kmemleak_erase(void **ptr)
91450 static inline void kmemleak_init(void)
91451 {
91452 }
91453-static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count,
91454+static inline void __size_overflow(2) kmemleak_alloc(const void *ptr, size_t size, int min_count,
91455 gfp_t gfp)
91456 {
91457 }
91458diff --git a/include/linux/kmod.h b/include/linux/kmod.h
91459index 0555cc6..40116ce 100644
91460--- a/include/linux/kmod.h
91461+++ b/include/linux/kmod.h
91462@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
91463 * usually useless though. */
91464 extern __printf(2, 3)
91465 int __request_module(bool wait, const char *name, ...);
91466+extern __printf(3, 4)
91467+int ___request_module(bool wait, char *param_name, const char *name, ...);
91468 #define request_module(mod...) __request_module(true, mod)
91469 #define request_module_nowait(mod...) __request_module(false, mod)
91470 #define try_then_request_module(x, mod...) \
91471@@ -57,6 +59,9 @@ struct subprocess_info {
91472 struct work_struct work;
91473 struct completion *complete;
91474 char *path;
91475+#ifdef CONFIG_GRKERNSEC
91476+ char *origpath;
91477+#endif
91478 char **argv;
91479 char **envp;
91480 int wait;
91481diff --git a/include/linux/kobject.h b/include/linux/kobject.h
91482index 2d61b90..a1d0a13 100644
91483--- a/include/linux/kobject.h
91484+++ b/include/linux/kobject.h
91485@@ -118,7 +118,7 @@ struct kobj_type {
91486 struct attribute **default_attrs;
91487 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
91488 const void *(*namespace)(struct kobject *kobj);
91489-};
91490+} __do_const;
91491
91492 struct kobj_uevent_env {
91493 char *argv[3];
91494@@ -142,6 +142,7 @@ struct kobj_attribute {
91495 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
91496 const char *buf, size_t count);
91497 };
91498+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
91499
91500 extern const struct sysfs_ops kobj_sysfs_ops;
91501
91502@@ -169,7 +170,7 @@ struct kset {
91503 spinlock_t list_lock;
91504 struct kobject kobj;
91505 const struct kset_uevent_ops *uevent_ops;
91506-};
91507+} __randomize_layout;
91508
91509 extern void kset_init(struct kset *kset);
91510 extern int __must_check kset_register(struct kset *kset);
91511diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
91512index df32d25..fb52e27 100644
91513--- a/include/linux/kobject_ns.h
91514+++ b/include/linux/kobject_ns.h
91515@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
91516 const void *(*netlink_ns)(struct sock *sk);
91517 const void *(*initial_ns)(void);
91518 void (*drop_ns)(void *);
91519-};
91520+} __do_const;
91521
91522 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
91523 int kobj_ns_type_registered(enum kobj_ns_type type);
91524diff --git a/include/linux/kref.h b/include/linux/kref.h
91525index 484604d..0f6c5b6 100644
91526--- a/include/linux/kref.h
91527+++ b/include/linux/kref.h
91528@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
91529 static inline int kref_sub(struct kref *kref, unsigned int count,
91530 void (*release)(struct kref *kref))
91531 {
91532- WARN_ON(release == NULL);
91533+ BUG_ON(release == NULL);
91534
91535 if (atomic_sub_and_test((int) count, &kref->refcount)) {
91536 release(kref);
91537diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
91538index d12b210..d91fd76 100644
91539--- a/include/linux/kvm_host.h
91540+++ b/include/linux/kvm_host.h
91541@@ -455,7 +455,7 @@ static inline void kvm_irqfd_exit(void)
91542 {
91543 }
91544 #endif
91545-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
91546+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
91547 struct module *module);
91548 void kvm_exit(void);
91549
91550@@ -633,7 +633,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
91551 struct kvm_guest_debug *dbg);
91552 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
91553
91554-int kvm_arch_init(void *opaque);
91555+int kvm_arch_init(const void *opaque);
91556 void kvm_arch_exit(void);
91557
91558 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
91559diff --git a/include/linux/libata.h b/include/linux/libata.h
91560index 6b08cc1..248c5e9 100644
91561--- a/include/linux/libata.h
91562+++ b/include/linux/libata.h
91563@@ -980,7 +980,7 @@ struct ata_port_operations {
91564 * fields must be pointers.
91565 */
91566 const struct ata_port_operations *inherits;
91567-};
91568+} __do_const;
91569
91570 struct ata_port_info {
91571 unsigned long flags;
91572diff --git a/include/linux/linkage.h b/include/linux/linkage.h
91573index a6a42dd..6c5ebce 100644
91574--- a/include/linux/linkage.h
91575+++ b/include/linux/linkage.h
91576@@ -36,6 +36,7 @@
91577 #endif
91578
91579 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
91580+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
91581 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
91582
91583 /*
91584diff --git a/include/linux/list.h b/include/linux/list.h
91585index feb773c..98f3075 100644
91586--- a/include/linux/list.h
91587+++ b/include/linux/list.h
91588@@ -113,6 +113,19 @@ extern void __list_del_entry(struct list_head *entry);
91589 extern void list_del(struct list_head *entry);
91590 #endif
91591
91592+extern void __pax_list_add(struct list_head *new,
91593+ struct list_head *prev,
91594+ struct list_head *next);
91595+static inline void pax_list_add(struct list_head *new, struct list_head *head)
91596+{
91597+ __pax_list_add(new, head, head->next);
91598+}
91599+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
91600+{
91601+ __pax_list_add(new, head->prev, head);
91602+}
91603+extern void pax_list_del(struct list_head *entry);
91604+
91605 /**
91606 * list_replace - replace old entry by new one
91607 * @old : the element to be replaced
91608@@ -146,6 +159,8 @@ static inline void list_del_init(struct list_head *entry)
91609 INIT_LIST_HEAD(entry);
91610 }
91611
91612+extern void pax_list_del_init(struct list_head *entry);
91613+
91614 /**
91615 * list_move - delete from one list and add as another's head
91616 * @list: the entry to move
91617diff --git a/include/linux/lockref.h b/include/linux/lockref.h
91618index b10b122..d37b3de 100644
91619--- a/include/linux/lockref.h
91620+++ b/include/linux/lockref.h
91621@@ -28,7 +28,7 @@ struct lockref {
91622 #endif
91623 struct {
91624 spinlock_t lock;
91625- int count;
91626+ atomic_t count;
91627 };
91628 };
91629 };
91630@@ -43,9 +43,29 @@ extern void lockref_mark_dead(struct lockref *);
91631 extern int lockref_get_not_dead(struct lockref *);
91632
91633 /* Must be called under spinlock for reliable results */
91634-static inline int __lockref_is_dead(const struct lockref *l)
91635+static inline int __lockref_is_dead(const struct lockref *lockref)
91636 {
91637- return ((int)l->count < 0);
91638+ return atomic_read(&lockref->count) < 0;
91639+}
91640+
91641+static inline int __lockref_read(const struct lockref *lockref)
91642+{
91643+ return atomic_read(&lockref->count);
91644+}
91645+
91646+static inline void __lockref_set(struct lockref *lockref, int count)
91647+{
91648+ atomic_set(&lockref->count, count);
91649+}
91650+
91651+static inline void __lockref_inc(struct lockref *lockref)
91652+{
91653+ atomic_inc(&lockref->count);
91654+}
91655+
91656+static inline void __lockref_dec(struct lockref *lockref)
91657+{
91658+ atomic_dec(&lockref->count);
91659 }
91660
91661 #endif /* __LINUX_LOCKREF_H */
91662diff --git a/include/linux/math64.h b/include/linux/math64.h
91663index c45c089..298841c 100644
91664--- a/include/linux/math64.h
91665+++ b/include/linux/math64.h
91666@@ -15,7 +15,7 @@
91667 * This is commonly provided by 32bit archs to provide an optimized 64bit
91668 * divide.
91669 */
91670-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
91671+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
91672 {
91673 *remainder = dividend % divisor;
91674 return dividend / divisor;
91675@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
91676 /**
91677 * div64_u64 - unsigned 64bit divide with 64bit divisor
91678 */
91679-static inline u64 div64_u64(u64 dividend, u64 divisor)
91680+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
91681 {
91682 return dividend / divisor;
91683 }
91684@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
91685 #define div64_ul(x, y) div_u64((x), (y))
91686
91687 #ifndef div_u64_rem
91688-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
91689+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
91690 {
91691 *remainder = do_div(dividend, divisor);
91692 return dividend;
91693@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
91694 #endif
91695
91696 #ifndef div64_u64
91697-extern u64 div64_u64(u64 dividend, u64 divisor);
91698+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
91699 #endif
91700
91701 #ifndef div64_s64
91702@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
91703 * divide.
91704 */
91705 #ifndef div_u64
91706-static inline u64 div_u64(u64 dividend, u32 divisor)
91707+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
91708 {
91709 u32 remainder;
91710 return div_u64_rem(dividend, divisor, &remainder);
91711diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
91712index 3d385c8..deacb6a 100644
91713--- a/include/linux/mempolicy.h
91714+++ b/include/linux/mempolicy.h
91715@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
91716 }
91717
91718 #define vma_policy(vma) ((vma)->vm_policy)
91719+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
91720+{
91721+ vma->vm_policy = pol;
91722+}
91723
91724 static inline void mpol_get(struct mempolicy *pol)
91725 {
91726@@ -229,6 +233,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
91727 }
91728
91729 #define vma_policy(vma) NULL
91730+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
91731+{
91732+}
91733
91734 static inline int
91735 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
91736diff --git a/include/linux/mm.h b/include/linux/mm.h
91737index 47a9392..ef645bc 100644
91738--- a/include/linux/mm.h
91739+++ b/include/linux/mm.h
91740@@ -135,6 +135,11 @@ extern unsigned int kobjsize(const void *objp);
91741
91742 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
91743 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
91744+
91745+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
91746+#define VM_PAGEEXEC 0x00080000 /* vma->vm_page_prot needs special handling */
91747+#endif
91748+
91749 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
91750 #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
91751 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
91752@@ -254,8 +259,8 @@ struct vm_operations_struct {
91753 /* called by access_process_vm when get_user_pages() fails, typically
91754 * for use by special VMAs that can switch between memory and hardware
91755 */
91756- int (*access)(struct vm_area_struct *vma, unsigned long addr,
91757- void *buf, int len, int write);
91758+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
91759+ void *buf, size_t len, int write);
91760
91761 /* Called by the /proc/PID/maps code to ask the vma whether it
91762 * has a special name. Returning non-NULL will also cause this
91763@@ -293,6 +298,7 @@ struct vm_operations_struct {
91764 struct page *(*find_special_page)(struct vm_area_struct *vma,
91765 unsigned long addr);
91766 };
91767+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
91768
91769 struct mmu_gather;
91770 struct inode;
91771@@ -1213,8 +1219,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
91772 unsigned long *pfn);
91773 int follow_phys(struct vm_area_struct *vma, unsigned long address,
91774 unsigned int flags, unsigned long *prot, resource_size_t *phys);
91775-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
91776- void *buf, int len, int write);
91777+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
91778+ void *buf, size_t len, int write);
91779
91780 static inline void unmap_shared_mapping_range(struct address_space *mapping,
91781 loff_t const holebegin, loff_t const holelen)
91782@@ -1254,9 +1260,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
91783 }
91784 #endif
91785
91786-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
91787-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
91788- void *buf, int len, int write);
91789+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
91790+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
91791+ void *buf, size_t len, int write);
91792
91793 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
91794 unsigned long start, unsigned long nr_pages,
91795@@ -1299,34 +1305,6 @@ int set_page_dirty_lock(struct page *page);
91796 int clear_page_dirty_for_io(struct page *page);
91797 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
91798
91799-/* Is the vma a continuation of the stack vma above it? */
91800-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
91801-{
91802- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
91803-}
91804-
91805-static inline int stack_guard_page_start(struct vm_area_struct *vma,
91806- unsigned long addr)
91807-{
91808- return (vma->vm_flags & VM_GROWSDOWN) &&
91809- (vma->vm_start == addr) &&
91810- !vma_growsdown(vma->vm_prev, addr);
91811-}
91812-
91813-/* Is the vma a continuation of the stack vma below it? */
91814-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
91815-{
91816- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
91817-}
91818-
91819-static inline int stack_guard_page_end(struct vm_area_struct *vma,
91820- unsigned long addr)
91821-{
91822- return (vma->vm_flags & VM_GROWSUP) &&
91823- (vma->vm_end == addr) &&
91824- !vma_growsup(vma->vm_next, addr);
91825-}
91826-
91827 extern struct task_struct *task_of_stack(struct task_struct *task,
91828 struct vm_area_struct *vma, bool in_group);
91829
91830@@ -1449,8 +1427,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
91831 {
91832 return 0;
91833 }
91834+
91835+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
91836+ unsigned long address)
91837+{
91838+ return 0;
91839+}
91840 #else
91841 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
91842+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
91843 #endif
91844
91845 #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
91846@@ -1460,6 +1445,12 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
91847 return 0;
91848 }
91849
91850+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
91851+ unsigned long address)
91852+{
91853+ return 0;
91854+}
91855+
91856 static inline void mm_nr_pmds_init(struct mm_struct *mm) {}
91857
91858 static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
91859@@ -1472,6 +1463,7 @@ static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
91860
91861 #else
91862 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
91863+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
91864
91865 static inline void mm_nr_pmds_init(struct mm_struct *mm)
91866 {
91867@@ -1509,11 +1501,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
91868 NULL: pud_offset(pgd, address);
91869 }
91870
91871+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
91872+{
91873+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
91874+ NULL: pud_offset(pgd, address);
91875+}
91876+
91877 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
91878 {
91879 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
91880 NULL: pmd_offset(pud, address);
91881 }
91882+
91883+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
91884+{
91885+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
91886+ NULL: pmd_offset(pud, address);
91887+}
91888 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
91889
91890 #if USE_SPLIT_PTE_PTLOCKS
91891@@ -1890,12 +1894,23 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
91892 bool *need_rmap_locks);
91893 extern void exit_mmap(struct mm_struct *);
91894
91895+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
91896+extern void gr_learn_resource(const struct task_struct *task, const int res,
91897+ const unsigned long wanted, const int gt);
91898+#else
91899+static inline void gr_learn_resource(const struct task_struct *task, const int res,
91900+ const unsigned long wanted, const int gt)
91901+{
91902+}
91903+#endif
91904+
91905 static inline int check_data_rlimit(unsigned long rlim,
91906 unsigned long new,
91907 unsigned long start,
91908 unsigned long end_data,
91909 unsigned long start_data)
91910 {
91911+ gr_learn_resource(current, RLIMIT_DATA, (new - start) + (end_data - start_data), 1);
91912 if (rlim < RLIM_INFINITY) {
91913 if (((new - start) + (end_data - start_data)) > rlim)
91914 return -ENOSPC;
91915@@ -1920,7 +1935,7 @@ extern int install_special_mapping(struct mm_struct *mm,
91916 unsigned long addr, unsigned long len,
91917 unsigned long flags, struct page **pages);
91918
91919-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
91920+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
91921
91922 extern unsigned long mmap_region(struct file *file, unsigned long addr,
91923 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
91924@@ -1928,6 +1943,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
91925 unsigned long len, unsigned long prot, unsigned long flags,
91926 unsigned long pgoff, unsigned long *populate);
91927 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
91928+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
91929
91930 #ifdef CONFIG_MMU
91931 extern int __mm_populate(unsigned long addr, unsigned long len,
91932@@ -1956,10 +1972,11 @@ struct vm_unmapped_area_info {
91933 unsigned long high_limit;
91934 unsigned long align_mask;
91935 unsigned long align_offset;
91936+ unsigned long threadstack_offset;
91937 };
91938
91939-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
91940-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
91941+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
91942+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
91943
91944 /*
91945 * Search for an unmapped address range.
91946@@ -1971,7 +1988,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
91947 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
91948 */
91949 static inline unsigned long
91950-vm_unmapped_area(struct vm_unmapped_area_info *info)
91951+vm_unmapped_area(const struct vm_unmapped_area_info *info)
91952 {
91953 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
91954 return unmapped_area(info);
91955@@ -2033,6 +2050,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
91956 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
91957 struct vm_area_struct **pprev);
91958
91959+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
91960+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
91961+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
91962+
91963 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
91964 NULL if none. Assume start_addr < end_addr. */
91965 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
91966@@ -2062,10 +2083,10 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
91967 }
91968
91969 #ifdef CONFIG_MMU
91970-pgprot_t vm_get_page_prot(unsigned long vm_flags);
91971+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
91972 void vma_set_page_prot(struct vm_area_struct *vma);
91973 #else
91974-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
91975+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
91976 {
91977 return __pgprot(0);
91978 }
91979@@ -2127,6 +2148,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
91980 static inline void vm_stat_account(struct mm_struct *mm,
91981 unsigned long flags, struct file *file, long pages)
91982 {
91983+
91984+#ifdef CONFIG_PAX_RANDMMAP
91985+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
91986+#endif
91987+
91988 mm->total_vm += pages;
91989 }
91990 #endif /* CONFIG_PROC_FS */
91991@@ -2229,7 +2255,7 @@ extern int unpoison_memory(unsigned long pfn);
91992 extern int sysctl_memory_failure_early_kill;
91993 extern int sysctl_memory_failure_recovery;
91994 extern void shake_page(struct page *p, int access);
91995-extern atomic_long_t num_poisoned_pages;
91996+extern atomic_long_unchecked_t num_poisoned_pages;
91997 extern int soft_offline_page(struct page *page, int flags);
91998
91999 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
92000@@ -2280,5 +2306,11 @@ void __init setup_nr_node_ids(void);
92001 static inline void setup_nr_node_ids(void) {}
92002 #endif
92003
92004+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
92005+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
92006+#else
92007+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
92008+#endif
92009+
92010 #endif /* __KERNEL__ */
92011 #endif /* _LINUX_MM_H */
92012diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
92013index 199a03a..7328440 100644
92014--- a/include/linux/mm_types.h
92015+++ b/include/linux/mm_types.h
92016@@ -313,7 +313,9 @@ struct vm_area_struct {
92017 #ifdef CONFIG_NUMA
92018 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
92019 #endif
92020-};
92021+
92022+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
92023+} __randomize_layout;
92024
92025 struct core_thread {
92026 struct task_struct *task;
92027@@ -464,7 +466,25 @@ struct mm_struct {
92028 /* address of the bounds directory */
92029 void __user *bd_addr;
92030 #endif
92031-};
92032+
92033+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
92034+ unsigned long pax_flags;
92035+#endif
92036+
92037+#ifdef CONFIG_PAX_DLRESOLVE
92038+ unsigned long call_dl_resolve;
92039+#endif
92040+
92041+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
92042+ unsigned long call_syscall;
92043+#endif
92044+
92045+#ifdef CONFIG_PAX_ASLR
92046+ unsigned long delta_mmap; /* randomized offset */
92047+ unsigned long delta_stack; /* randomized offset */
92048+#endif
92049+
92050+} __randomize_layout;
92051
92052 static inline void mm_init_cpumask(struct mm_struct *mm)
92053 {
92054diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
92055index 160448f..7b332b7 100644
92056--- a/include/linux/mmc/core.h
92057+++ b/include/linux/mmc/core.h
92058@@ -79,7 +79,7 @@ struct mmc_command {
92059 #define mmc_cmd_type(cmd) ((cmd)->flags & MMC_CMD_MASK)
92060
92061 unsigned int retries; /* max number of retries */
92062- unsigned int error; /* command error */
92063+ int error; /* command error */
92064
92065 /*
92066 * Standard errno values are used for errors, but some have specific
92067diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
92068index c5d5278..f0b68c8 100644
92069--- a/include/linux/mmiotrace.h
92070+++ b/include/linux/mmiotrace.h
92071@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
92072 /* Called from ioremap.c */
92073 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
92074 void __iomem *addr);
92075-extern void mmiotrace_iounmap(volatile void __iomem *addr);
92076+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
92077
92078 /* For anyone to insert markers. Remember trailing newline. */
92079 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
92080@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
92081 {
92082 }
92083
92084-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
92085+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
92086 {
92087 }
92088
92089diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
92090index 2782df4..abe756e 100644
92091--- a/include/linux/mmzone.h
92092+++ b/include/linux/mmzone.h
92093@@ -526,7 +526,7 @@ struct zone {
92094
92095 ZONE_PADDING(_pad3_)
92096 /* Zone statistics */
92097- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
92098+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
92099 } ____cacheline_internodealigned_in_smp;
92100
92101 enum zone_flags {
92102diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
92103index e530533..c9620c7 100644
92104--- a/include/linux/mod_devicetable.h
92105+++ b/include/linux/mod_devicetable.h
92106@@ -139,7 +139,7 @@ struct usb_device_id {
92107 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
92108 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
92109
92110-#define HID_ANY_ID (~0)
92111+#define HID_ANY_ID (~0U)
92112 #define HID_BUS_ANY 0xffff
92113 #define HID_GROUP_ANY 0x0000
92114
92115@@ -470,7 +470,7 @@ struct dmi_system_id {
92116 const char *ident;
92117 struct dmi_strmatch matches[4];
92118 void *driver_data;
92119-};
92120+} __do_const;
92121 /*
92122 * struct dmi_device_id appears during expansion of
92123 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
92124diff --git a/include/linux/module.h b/include/linux/module.h
92125index b03485b..a26974f 100644
92126--- a/include/linux/module.h
92127+++ b/include/linux/module.h
92128@@ -17,9 +17,11 @@
92129 #include <linux/moduleparam.h>
92130 #include <linux/jump_label.h>
92131 #include <linux/export.h>
92132+#include <linux/fs.h>
92133
92134 #include <linux/percpu.h>
92135 #include <asm/module.h>
92136+#include <asm/pgtable.h>
92137
92138 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
92139 #define MODULE_SIG_STRING "~Module signature appended~\n"
92140@@ -42,7 +44,7 @@ struct module_kobject {
92141 struct kobject *drivers_dir;
92142 struct module_param_attrs *mp;
92143 struct completion *kobj_completion;
92144-};
92145+} __randomize_layout;
92146
92147 struct module_attribute {
92148 struct attribute attr;
92149@@ -54,12 +56,13 @@ struct module_attribute {
92150 int (*test)(struct module *);
92151 void (*free)(struct module *);
92152 };
92153+typedef struct module_attribute __no_const module_attribute_no_const;
92154
92155 struct module_version_attribute {
92156 struct module_attribute mattr;
92157 const char *module_name;
92158 const char *version;
92159-} __attribute__ ((__aligned__(sizeof(void *))));
92160+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
92161
92162 extern ssize_t __modver_version_show(struct module_attribute *,
92163 struct module_kobject *, char *);
92164@@ -221,7 +224,7 @@ struct module {
92165
92166 /* Sysfs stuff. */
92167 struct module_kobject mkobj;
92168- struct module_attribute *modinfo_attrs;
92169+ module_attribute_no_const *modinfo_attrs;
92170 const char *version;
92171 const char *srcversion;
92172 struct kobject *holders_dir;
92173@@ -270,19 +273,16 @@ struct module {
92174 int (*init)(void);
92175
92176 /* If this is non-NULL, vfree after init() returns */
92177- void *module_init;
92178+ void *module_init_rx, *module_init_rw;
92179
92180 /* Here is the actual code + data, vfree'd on unload. */
92181- void *module_core;
92182+ void *module_core_rx, *module_core_rw;
92183
92184 /* Here are the sizes of the init and core sections */
92185- unsigned int init_size, core_size;
92186+ unsigned int init_size_rw, core_size_rw;
92187
92188 /* The size of the executable code in each section. */
92189- unsigned int init_text_size, core_text_size;
92190-
92191- /* Size of RO sections of the module (text+rodata) */
92192- unsigned int init_ro_size, core_ro_size;
92193+ unsigned int init_size_rx, core_size_rx;
92194
92195 /* Arch-specific module values */
92196 struct mod_arch_specific arch;
92197@@ -338,6 +338,10 @@ struct module {
92198 #ifdef CONFIG_EVENT_TRACING
92199 struct ftrace_event_call **trace_events;
92200 unsigned int num_trace_events;
92201+ struct file_operations trace_id;
92202+ struct file_operations trace_enable;
92203+ struct file_operations trace_format;
92204+ struct file_operations trace_filter;
92205 #endif
92206 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
92207 unsigned int num_ftrace_callsites;
92208@@ -365,7 +369,7 @@ struct module {
92209 ctor_fn_t *ctors;
92210 unsigned int num_ctors;
92211 #endif
92212-};
92213+} __randomize_layout;
92214 #ifndef MODULE_ARCH_INIT
92215 #define MODULE_ARCH_INIT {}
92216 #endif
92217@@ -386,18 +390,48 @@ bool is_module_address(unsigned long addr);
92218 bool is_module_percpu_address(unsigned long addr);
92219 bool is_module_text_address(unsigned long addr);
92220
92221+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
92222+{
92223+
92224+#ifdef CONFIG_PAX_KERNEXEC
92225+ if (ktla_ktva(addr) >= (unsigned long)start &&
92226+ ktla_ktva(addr) < (unsigned long)start + size)
92227+ return 1;
92228+#endif
92229+
92230+ return ((void *)addr >= start && (void *)addr < start + size);
92231+}
92232+
92233+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
92234+{
92235+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
92236+}
92237+
92238+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
92239+{
92240+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
92241+}
92242+
92243+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
92244+{
92245+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
92246+}
92247+
92248+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
92249+{
92250+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
92251+}
92252+
92253 static inline bool within_module_core(unsigned long addr,
92254 const struct module *mod)
92255 {
92256- return (unsigned long)mod->module_core <= addr &&
92257- addr < (unsigned long)mod->module_core + mod->core_size;
92258+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
92259 }
92260
92261 static inline bool within_module_init(unsigned long addr,
92262 const struct module *mod)
92263 {
92264- return (unsigned long)mod->module_init <= addr &&
92265- addr < (unsigned long)mod->module_init + mod->init_size;
92266+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
92267 }
92268
92269 static inline bool within_module(unsigned long addr, const struct module *mod)
92270diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
92271index 4d0cb9b..3169ac7 100644
92272--- a/include/linux/moduleloader.h
92273+++ b/include/linux/moduleloader.h
92274@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
92275 sections. Returns NULL on failure. */
92276 void *module_alloc(unsigned long size);
92277
92278+#ifdef CONFIG_PAX_KERNEXEC
92279+void *module_alloc_exec(unsigned long size);
92280+#else
92281+#define module_alloc_exec(x) module_alloc(x)
92282+#endif
92283+
92284 /* Free memory returned from module_alloc. */
92285 void module_memfree(void *module_region);
92286
92287+#ifdef CONFIG_PAX_KERNEXEC
92288+void module_memfree_exec(void *module_region);
92289+#else
92290+#define module_memfree_exec(x) module_memfree((x))
92291+#endif
92292+
92293 /*
92294 * Apply the given relocation to the (simplified) ELF. Return -error
92295 * or 0.
92296@@ -45,8 +57,10 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
92297 unsigned int relsec,
92298 struct module *me)
92299 {
92300+#ifdef CONFIG_MODULES
92301 printk(KERN_ERR "module %s: REL relocation unsupported\n",
92302 module_name(me));
92303+#endif
92304 return -ENOEXEC;
92305 }
92306 #endif
92307@@ -68,8 +82,10 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
92308 unsigned int relsec,
92309 struct module *me)
92310 {
92311+#ifdef CONFIG_MODULES
92312 printk(KERN_ERR "module %s: REL relocation unsupported\n",
92313 module_name(me));
92314+#endif
92315 return -ENOEXEC;
92316 }
92317 #endif
92318diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
92319index 1c9effa..1160bdd 100644
92320--- a/include/linux/moduleparam.h
92321+++ b/include/linux/moduleparam.h
92322@@ -323,7 +323,7 @@ static inline void __kernel_param_unlock(void)
92323 * @len is usually just sizeof(string).
92324 */
92325 #define module_param_string(name, string, len, perm) \
92326- static const struct kparam_string __param_string_##name \
92327+ static const struct kparam_string __param_string_##name __used \
92328 = { len, string }; \
92329 __module_param_call(MODULE_PARAM_PREFIX, name, \
92330 &param_ops_string, \
92331@@ -467,7 +467,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
92332 */
92333 #define module_param_array_named(name, array, type, nump, perm) \
92334 param_check_##type(name, &(array)[0]); \
92335- static const struct kparam_array __param_arr_##name \
92336+ static const struct kparam_array __param_arr_##name __used \
92337 = { .max = ARRAY_SIZE(array), .num = nump, \
92338 .ops = &param_ops_##type, \
92339 .elemsize = sizeof(array[0]), .elem = array }; \
92340diff --git a/include/linux/mount.h b/include/linux/mount.h
92341index 564beee..653be6f 100644
92342--- a/include/linux/mount.h
92343+++ b/include/linux/mount.h
92344@@ -67,7 +67,7 @@ struct vfsmount {
92345 struct dentry *mnt_root; /* root of the mounted tree */
92346 struct super_block *mnt_sb; /* pointer to superblock */
92347 int mnt_flags;
92348-};
92349+} __randomize_layout;
92350
92351 struct file; /* forward dec */
92352 struct path;
92353diff --git a/include/linux/namei.h b/include/linux/namei.h
92354index c899077..b9a2010 100644
92355--- a/include/linux/namei.h
92356+++ b/include/linux/namei.h
92357@@ -71,8 +71,8 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
92358 extern void unlock_rename(struct dentry *, struct dentry *);
92359
92360 extern void nd_jump_link(struct nameidata *nd, struct path *path);
92361-extern void nd_set_link(struct nameidata *nd, char *path);
92362-extern char *nd_get_link(struct nameidata *nd);
92363+extern void nd_set_link(struct nameidata *nd, const char *path);
92364+extern const char *nd_get_link(const struct nameidata *nd);
92365
92366 static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
92367 {
92368diff --git a/include/linux/net.h b/include/linux/net.h
92369index 17d8339..81656c0 100644
92370--- a/include/linux/net.h
92371+++ b/include/linux/net.h
92372@@ -192,7 +192,7 @@ struct net_proto_family {
92373 int (*create)(struct net *net, struct socket *sock,
92374 int protocol, int kern);
92375 struct module *owner;
92376-};
92377+} __do_const;
92378
92379 struct iovec;
92380 struct kvec;
92381diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
92382index 2787388..1dd8e88 100644
92383--- a/include/linux/netdevice.h
92384+++ b/include/linux/netdevice.h
92385@@ -1198,6 +1198,7 @@ struct net_device_ops {
92386 u8 state);
92387 #endif
92388 };
92389+typedef struct net_device_ops __no_const net_device_ops_no_const;
92390
92391 /**
92392 * enum net_device_priv_flags - &struct net_device priv_flags
92393@@ -1546,10 +1547,10 @@ struct net_device {
92394
92395 struct net_device_stats stats;
92396
92397- atomic_long_t rx_dropped;
92398- atomic_long_t tx_dropped;
92399+ atomic_long_unchecked_t rx_dropped;
92400+ atomic_long_unchecked_t tx_dropped;
92401
92402- atomic_t carrier_changes;
92403+ atomic_unchecked_t carrier_changes;
92404
92405 #ifdef CONFIG_WIRELESS_EXT
92406 const struct iw_handler_def * wireless_handlers;
92407diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
92408index 2517ece..0bbfcfb 100644
92409--- a/include/linux/netfilter.h
92410+++ b/include/linux/netfilter.h
92411@@ -85,7 +85,7 @@ struct nf_sockopt_ops {
92412 #endif
92413 /* Use the module struct to lock set/get code in place */
92414 struct module *owner;
92415-};
92416+} __do_const;
92417
92418 /* Function to register/unregister hook points. */
92419 int nf_register_hook(struct nf_hook_ops *reg);
92420diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
92421index e955d47..04a5338 100644
92422--- a/include/linux/netfilter/nfnetlink.h
92423+++ b/include/linux/netfilter/nfnetlink.h
92424@@ -19,7 +19,7 @@ struct nfnl_callback {
92425 const struct nlattr * const cda[]);
92426 const struct nla_policy *policy; /* netlink attribute policy */
92427 const u_int16_t attr_count; /* number of nlattr's */
92428-};
92429+} __do_const;
92430
92431 struct nfnetlink_subsystem {
92432 const char *name;
92433diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
92434new file mode 100644
92435index 0000000..33f4af8
92436--- /dev/null
92437+++ b/include/linux/netfilter/xt_gradm.h
92438@@ -0,0 +1,9 @@
92439+#ifndef _LINUX_NETFILTER_XT_GRADM_H
92440+#define _LINUX_NETFILTER_XT_GRADM_H 1
92441+
92442+struct xt_gradm_mtinfo {
92443+ __u16 flags;
92444+ __u16 invflags;
92445+};
92446+
92447+#endif
92448diff --git a/include/linux/nls.h b/include/linux/nls.h
92449index 520681b..2b7fabb 100644
92450--- a/include/linux/nls.h
92451+++ b/include/linux/nls.h
92452@@ -31,7 +31,7 @@ struct nls_table {
92453 const unsigned char *charset2upper;
92454 struct module *owner;
92455 struct nls_table *next;
92456-};
92457+} __do_const;
92458
92459 /* this value hold the maximum octet of charset */
92460 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
92461@@ -46,7 +46,7 @@ enum utf16_endian {
92462 /* nls_base.c */
92463 extern int __register_nls(struct nls_table *, struct module *);
92464 extern int unregister_nls(struct nls_table *);
92465-extern struct nls_table *load_nls(char *);
92466+extern struct nls_table *load_nls(const char *);
92467 extern void unload_nls(struct nls_table *);
92468 extern struct nls_table *load_nls_default(void);
92469 #define register_nls(nls) __register_nls((nls), THIS_MODULE)
92470diff --git a/include/linux/notifier.h b/include/linux/notifier.h
92471index d14a4c3..a078786 100644
92472--- a/include/linux/notifier.h
92473+++ b/include/linux/notifier.h
92474@@ -54,7 +54,8 @@ struct notifier_block {
92475 notifier_fn_t notifier_call;
92476 struct notifier_block __rcu *next;
92477 int priority;
92478-};
92479+} __do_const;
92480+typedef struct notifier_block __no_const notifier_block_no_const;
92481
92482 struct atomic_notifier_head {
92483 spinlock_t lock;
92484diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
92485index b2a0f15..4d7da32 100644
92486--- a/include/linux/oprofile.h
92487+++ b/include/linux/oprofile.h
92488@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
92489 int oprofilefs_create_ro_ulong(struct dentry * root,
92490 char const * name, ulong * val);
92491
92492-/** Create a file for read-only access to an atomic_t. */
92493+/** Create a file for read-only access to an atomic_unchecked_t. */
92494 int oprofilefs_create_ro_atomic(struct dentry * root,
92495- char const * name, atomic_t * val);
92496+ char const * name, atomic_unchecked_t * val);
92497
92498 /** create a directory */
92499 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
92500diff --git a/include/linux/padata.h b/include/linux/padata.h
92501index 4386946..f50c615 100644
92502--- a/include/linux/padata.h
92503+++ b/include/linux/padata.h
92504@@ -129,7 +129,7 @@ struct parallel_data {
92505 struct padata_serial_queue __percpu *squeue;
92506 atomic_t reorder_objects;
92507 atomic_t refcnt;
92508- atomic_t seq_nr;
92509+ atomic_unchecked_t seq_nr;
92510 struct padata_cpumask cpumask;
92511 spinlock_t lock ____cacheline_aligned;
92512 unsigned int processed;
92513diff --git a/include/linux/path.h b/include/linux/path.h
92514index d137218..be0c176 100644
92515--- a/include/linux/path.h
92516+++ b/include/linux/path.h
92517@@ -1,13 +1,15 @@
92518 #ifndef _LINUX_PATH_H
92519 #define _LINUX_PATH_H
92520
92521+#include <linux/compiler.h>
92522+
92523 struct dentry;
92524 struct vfsmount;
92525
92526 struct path {
92527 struct vfsmount *mnt;
92528 struct dentry *dentry;
92529-};
92530+} __randomize_layout;
92531
92532 extern void path_get(const struct path *);
92533 extern void path_put(const struct path *);
92534diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
92535index 8c78950..0d74ed9 100644
92536--- a/include/linux/pci_hotplug.h
92537+++ b/include/linux/pci_hotplug.h
92538@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
92539 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
92540 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
92541 int (*reset_slot) (struct hotplug_slot *slot, int probe);
92542-};
92543+} __do_const;
92544+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
92545
92546 /**
92547 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
92548diff --git a/include/linux/percpu.h b/include/linux/percpu.h
92549index caebf2a..4c3ae9d 100644
92550--- a/include/linux/percpu.h
92551+++ b/include/linux/percpu.h
92552@@ -34,7 +34,7 @@
92553 * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
92554 * larger than PERCPU_DYNAMIC_EARLY_SIZE.
92555 */
92556-#define PERCPU_DYNAMIC_EARLY_SLOTS 128
92557+#define PERCPU_DYNAMIC_EARLY_SLOTS 256
92558 #define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
92559
92560 /*
92561diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
92562index 2b62198..2b74233 100644
92563--- a/include/linux/perf_event.h
92564+++ b/include/linux/perf_event.h
92565@@ -343,8 +343,8 @@ struct perf_event {
92566
92567 enum perf_event_active_state state;
92568 unsigned int attach_state;
92569- local64_t count;
92570- atomic64_t child_count;
92571+ local64_t count; /* PaX: fix it one day */
92572+ atomic64_unchecked_t child_count;
92573
92574 /*
92575 * These are the total time in nanoseconds that the event
92576@@ -395,8 +395,8 @@ struct perf_event {
92577 * These accumulate total time (in nanoseconds) that children
92578 * events have been enabled and running, respectively.
92579 */
92580- atomic64_t child_total_time_enabled;
92581- atomic64_t child_total_time_running;
92582+ atomic64_unchecked_t child_total_time_enabled;
92583+ atomic64_unchecked_t child_total_time_running;
92584
92585 /*
92586 * Protect attach/detach and child_list:
92587@@ -752,7 +752,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
92588 entry->ip[entry->nr++] = ip;
92589 }
92590
92591-extern int sysctl_perf_event_paranoid;
92592+extern int sysctl_perf_event_legitimately_concerned;
92593 extern int sysctl_perf_event_mlock;
92594 extern int sysctl_perf_event_sample_rate;
92595 extern int sysctl_perf_cpu_time_max_percent;
92596@@ -767,19 +767,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
92597 loff_t *ppos);
92598
92599
92600+static inline bool perf_paranoid_any(void)
92601+{
92602+ return sysctl_perf_event_legitimately_concerned > 2;
92603+}
92604+
92605 static inline bool perf_paranoid_tracepoint_raw(void)
92606 {
92607- return sysctl_perf_event_paranoid > -1;
92608+ return sysctl_perf_event_legitimately_concerned > -1;
92609 }
92610
92611 static inline bool perf_paranoid_cpu(void)
92612 {
92613- return sysctl_perf_event_paranoid > 0;
92614+ return sysctl_perf_event_legitimately_concerned > 0;
92615 }
92616
92617 static inline bool perf_paranoid_kernel(void)
92618 {
92619- return sysctl_perf_event_paranoid > 1;
92620+ return sysctl_perf_event_legitimately_concerned > 1;
92621 }
92622
92623 extern void perf_event_init(void);
92624@@ -912,7 +917,7 @@ struct perf_pmu_events_attr {
92625 struct device_attribute attr;
92626 u64 id;
92627 const char *event_str;
92628-};
92629+} __do_const;
92630
92631 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
92632 char *page);
92633diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
92634index 918b117..7af374b7 100644
92635--- a/include/linux/pid_namespace.h
92636+++ b/include/linux/pid_namespace.h
92637@@ -45,7 +45,7 @@ struct pid_namespace {
92638 int hide_pid;
92639 int reboot; /* group exit code if this pidns was rebooted */
92640 struct ns_common ns;
92641-};
92642+} __randomize_layout;
92643
92644 extern struct pid_namespace init_pid_ns;
92645
92646diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
92647index eb8b8ac..62649e1 100644
92648--- a/include/linux/pipe_fs_i.h
92649+++ b/include/linux/pipe_fs_i.h
92650@@ -47,10 +47,10 @@ struct pipe_inode_info {
92651 struct mutex mutex;
92652 wait_queue_head_t wait;
92653 unsigned int nrbufs, curbuf, buffers;
92654- unsigned int readers;
92655- unsigned int writers;
92656- unsigned int files;
92657- unsigned int waiting_writers;
92658+ atomic_t readers;
92659+ atomic_t writers;
92660+ atomic_t files;
92661+ atomic_t waiting_writers;
92662 unsigned int r_counter;
92663 unsigned int w_counter;
92664 struct page *tmp_page;
92665diff --git a/include/linux/pm.h b/include/linux/pm.h
92666index e2f1be6..78a0506 100644
92667--- a/include/linux/pm.h
92668+++ b/include/linux/pm.h
92669@@ -608,6 +608,7 @@ struct dev_pm_domain {
92670 struct dev_pm_ops ops;
92671 void (*detach)(struct device *dev, bool power_off);
92672 };
92673+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
92674
92675 /*
92676 * The PM_EVENT_ messages are also used by drivers implementing the legacy
92677diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
92678index 080e778..cbdaef7 100644
92679--- a/include/linux/pm_domain.h
92680+++ b/include/linux/pm_domain.h
92681@@ -39,11 +39,11 @@ struct gpd_dev_ops {
92682 int (*save_state)(struct device *dev);
92683 int (*restore_state)(struct device *dev);
92684 bool (*active_wakeup)(struct device *dev);
92685-};
92686+} __no_const;
92687
92688 struct gpd_cpuidle_data {
92689 unsigned int saved_exit_latency;
92690- struct cpuidle_state *idle_state;
92691+ cpuidle_state_no_const *idle_state;
92692 };
92693
92694 struct generic_pm_domain {
92695diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
92696index 30e84d4..22278b4 100644
92697--- a/include/linux/pm_runtime.h
92698+++ b/include/linux/pm_runtime.h
92699@@ -115,7 +115,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
92700
92701 static inline void pm_runtime_mark_last_busy(struct device *dev)
92702 {
92703- ACCESS_ONCE(dev->power.last_busy) = jiffies;
92704+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
92705 }
92706
92707 static inline bool pm_runtime_is_irq_safe(struct device *dev)
92708diff --git a/include/linux/pnp.h b/include/linux/pnp.h
92709index 6512e9c..ec27fa2 100644
92710--- a/include/linux/pnp.h
92711+++ b/include/linux/pnp.h
92712@@ -298,7 +298,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
92713 struct pnp_fixup {
92714 char id[7];
92715 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
92716-};
92717+} __do_const;
92718
92719 /* config parameters */
92720 #define PNP_CONFIG_NORMAL 0x0001
92721diff --git a/include/linux/poison.h b/include/linux/poison.h
92722index 2110a81..13a11bb 100644
92723--- a/include/linux/poison.h
92724+++ b/include/linux/poison.h
92725@@ -19,8 +19,8 @@
92726 * under normal circumstances, used to verify that nobody uses
92727 * non-initialized list entries.
92728 */
92729-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
92730-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
92731+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
92732+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
92733
92734 /********** include/linux/timer.h **********/
92735 /*
92736diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
92737index d8b187c3..9a9257a 100644
92738--- a/include/linux/power/smartreflex.h
92739+++ b/include/linux/power/smartreflex.h
92740@@ -238,7 +238,7 @@ struct omap_sr_class_data {
92741 int (*notify)(struct omap_sr *sr, u32 status);
92742 u8 notify_flags;
92743 u8 class_type;
92744-};
92745+} __do_const;
92746
92747 /**
92748 * struct omap_sr_nvalue_table - Smartreflex n-target value info
92749diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
92750index 4ea1d37..80f4b33 100644
92751--- a/include/linux/ppp-comp.h
92752+++ b/include/linux/ppp-comp.h
92753@@ -84,7 +84,7 @@ struct compressor {
92754 struct module *owner;
92755 /* Extra skb space needed by the compressor algorithm */
92756 unsigned int comp_extra;
92757-};
92758+} __do_const;
92759
92760 /*
92761 * The return value from decompress routine is the length of the
92762diff --git a/include/linux/preempt.h b/include/linux/preempt.h
92763index de83b4e..c4b997d 100644
92764--- a/include/linux/preempt.h
92765+++ b/include/linux/preempt.h
92766@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
92767 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
92768 #endif
92769
92770+#define raw_preempt_count_add(val) __preempt_count_add(val)
92771+#define raw_preempt_count_sub(val) __preempt_count_sub(val)
92772+
92773 #define __preempt_count_inc() __preempt_count_add(1)
92774 #define __preempt_count_dec() __preempt_count_sub(1)
92775
92776 #define preempt_count_inc() preempt_count_add(1)
92777+#define raw_preempt_count_inc() raw_preempt_count_add(1)
92778 #define preempt_count_dec() preempt_count_sub(1)
92779+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
92780
92781 #ifdef CONFIG_PREEMPT_COUNT
92782
92783@@ -41,6 +46,12 @@ do { \
92784 barrier(); \
92785 } while (0)
92786
92787+#define raw_preempt_disable() \
92788+do { \
92789+ raw_preempt_count_inc(); \
92790+ barrier(); \
92791+} while (0)
92792+
92793 #define sched_preempt_enable_no_resched() \
92794 do { \
92795 barrier(); \
92796@@ -49,6 +60,12 @@ do { \
92797
92798 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
92799
92800+#define raw_preempt_enable_no_resched() \
92801+do { \
92802+ barrier(); \
92803+ raw_preempt_count_dec(); \
92804+} while (0)
92805+
92806 #ifdef CONFIG_PREEMPT
92807 #define preempt_enable() \
92808 do { \
92809@@ -113,8 +130,10 @@ do { \
92810 * region.
92811 */
92812 #define preempt_disable() barrier()
92813+#define raw_preempt_disable() barrier()
92814 #define sched_preempt_enable_no_resched() barrier()
92815 #define preempt_enable_no_resched() barrier()
92816+#define raw_preempt_enable_no_resched() barrier()
92817 #define preempt_enable() barrier()
92818 #define preempt_check_resched() do { } while (0)
92819
92820@@ -128,11 +147,13 @@ do { \
92821 /*
92822 * Modules have no business playing preemption tricks.
92823 */
92824+#ifndef CONFIG_PAX_KERNEXEC
92825 #undef sched_preempt_enable_no_resched
92826 #undef preempt_enable_no_resched
92827 #undef preempt_enable_no_resched_notrace
92828 #undef preempt_check_resched
92829 #endif
92830+#endif
92831
92832 #define preempt_set_need_resched() \
92833 do { \
92834diff --git a/include/linux/printk.h b/include/linux/printk.h
92835index baa3f97..168cff1 100644
92836--- a/include/linux/printk.h
92837+++ b/include/linux/printk.h
92838@@ -121,6 +121,7 @@ void early_printk(const char *s, ...) { }
92839 #endif
92840
92841 typedef int(*printk_func_t)(const char *fmt, va_list args);
92842+extern int kptr_restrict;
92843
92844 #ifdef CONFIG_PRINTK
92845 asmlinkage __printf(5, 0)
92846@@ -156,7 +157,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
92847
92848 extern int printk_delay_msec;
92849 extern int dmesg_restrict;
92850-extern int kptr_restrict;
92851
92852 extern void wake_up_klogd(void);
92853
92854diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
92855index b97bf2e..f14c92d4 100644
92856--- a/include/linux/proc_fs.h
92857+++ b/include/linux/proc_fs.h
92858@@ -17,8 +17,11 @@ extern void proc_flush_task(struct task_struct *);
92859 extern struct proc_dir_entry *proc_symlink(const char *,
92860 struct proc_dir_entry *, const char *);
92861 extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
92862+extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *);
92863 extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
92864 struct proc_dir_entry *, void *);
92865+extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t,
92866+ struct proc_dir_entry *, void *);
92867 extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
92868 struct proc_dir_entry *);
92869
92870@@ -34,6 +37,19 @@ static inline struct proc_dir_entry *proc_create(
92871 return proc_create_data(name, mode, parent, proc_fops, NULL);
92872 }
92873
92874+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
92875+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
92876+{
92877+#ifdef CONFIG_GRKERNSEC_PROC_USER
92878+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
92879+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
92880+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
92881+#else
92882+ return proc_create_data(name, mode, parent, proc_fops, NULL);
92883+#endif
92884+}
92885+
92886+
92887 extern void proc_set_size(struct proc_dir_entry *, loff_t);
92888 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
92889 extern void *PDE_DATA(const struct inode *);
92890@@ -56,8 +72,12 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
92891 struct proc_dir_entry *parent,const char *dest) { return NULL;}
92892 static inline struct proc_dir_entry *proc_mkdir(const char *name,
92893 struct proc_dir_entry *parent) {return NULL;}
92894+static inline struct proc_dir_entry *proc_mkdir_restrict(const char *name,
92895+ struct proc_dir_entry *parent) { return NULL; }
92896 static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
92897 umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
92898+static inline struct proc_dir_entry *proc_mkdir_data_restrict(const char *name,
92899+ umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
92900 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
92901 umode_t mode, struct proc_dir_entry *parent) { return NULL; }
92902 #define proc_create(name, mode, parent, proc_fops) ({NULL;})
92903@@ -79,7 +99,7 @@ struct net;
92904 static inline struct proc_dir_entry *proc_net_mkdir(
92905 struct net *net, const char *name, struct proc_dir_entry *parent)
92906 {
92907- return proc_mkdir_data(name, 0, parent, net);
92908+ return proc_mkdir_data_restrict(name, 0, parent, net);
92909 }
92910
92911 #endif /* _LINUX_PROC_FS_H */
92912diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
92913index 42dfc61..8113a99 100644
92914--- a/include/linux/proc_ns.h
92915+++ b/include/linux/proc_ns.h
92916@@ -16,7 +16,7 @@ struct proc_ns_operations {
92917 struct ns_common *(*get)(struct task_struct *task);
92918 void (*put)(struct ns_common *ns);
92919 int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
92920-};
92921+} __do_const __randomize_layout;
92922
92923 extern const struct proc_ns_operations netns_operations;
92924 extern const struct proc_ns_operations utsns_operations;
92925diff --git a/include/linux/quota.h b/include/linux/quota.h
92926index d534e8e..782e604 100644
92927--- a/include/linux/quota.h
92928+++ b/include/linux/quota.h
92929@@ -75,7 +75,7 @@ struct kqid { /* Type in which we store the quota identifier */
92930
92931 extern bool qid_eq(struct kqid left, struct kqid right);
92932 extern bool qid_lt(struct kqid left, struct kqid right);
92933-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
92934+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
92935 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
92936 extern bool qid_valid(struct kqid qid);
92937
92938diff --git a/include/linux/random.h b/include/linux/random.h
92939index b05856e..0a9f14e 100644
92940--- a/include/linux/random.h
92941+++ b/include/linux/random.h
92942@@ -9,9 +9,19 @@
92943 #include <uapi/linux/random.h>
92944
92945 extern void add_device_randomness(const void *, unsigned int);
92946+
92947+static inline void add_latent_entropy(void)
92948+{
92949+
92950+#ifdef LATENT_ENTROPY_PLUGIN
92951+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
92952+#endif
92953+
92954+}
92955+
92956 extern void add_input_randomness(unsigned int type, unsigned int code,
92957- unsigned int value);
92958-extern void add_interrupt_randomness(int irq, int irq_flags);
92959+ unsigned int value) __latent_entropy;
92960+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
92961
92962 extern void get_random_bytes(void *buf, int nbytes);
92963 extern void get_random_bytes_arch(void *buf, int nbytes);
92964@@ -22,10 +32,10 @@ extern int random_int_secret_init(void);
92965 extern const struct file_operations random_fops, urandom_fops;
92966 #endif
92967
92968-unsigned int get_random_int(void);
92969+unsigned int __intentional_overflow(-1) get_random_int(void);
92970 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
92971
92972-u32 prandom_u32(void);
92973+u32 prandom_u32(void) __intentional_overflow(-1);
92974 void prandom_bytes(void *buf, size_t nbytes);
92975 void prandom_seed(u32 seed);
92976 void prandom_reseed_late(void);
92977@@ -37,6 +47,11 @@ struct rnd_state {
92978 u32 prandom_u32_state(struct rnd_state *state);
92979 void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
92980
92981+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
92982+{
92983+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
92984+}
92985+
92986 /**
92987 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
92988 * @ep_ro: right open interval endpoint
92989@@ -49,7 +64,7 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
92990 *
92991 * Returns: pseudo-random number in interval [0, ep_ro)
92992 */
92993-static inline u32 prandom_u32_max(u32 ep_ro)
92994+static inline u32 __intentional_overflow(-1) prandom_u32_max(u32 ep_ro)
92995 {
92996 return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
92997 }
92998diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
92999index 378c5ee..aa84a47 100644
93000--- a/include/linux/rbtree_augmented.h
93001+++ b/include/linux/rbtree_augmented.h
93002@@ -90,7 +90,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
93003 old->rbaugmented = rbcompute(old); \
93004 } \
93005 rbstatic const struct rb_augment_callbacks rbname = { \
93006- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
93007+ .propagate = rbname ## _propagate, \
93008+ .copy = rbname ## _copy, \
93009+ .rotate = rbname ## _rotate \
93010 };
93011
93012
93013diff --git a/include/linux/rculist.h b/include/linux/rculist.h
93014index a18b16f..2683096 100644
93015--- a/include/linux/rculist.h
93016+++ b/include/linux/rculist.h
93017@@ -29,8 +29,8 @@
93018 */
93019 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
93020 {
93021- ACCESS_ONCE(list->next) = list;
93022- ACCESS_ONCE(list->prev) = list;
93023+ ACCESS_ONCE_RW(list->next) = list;
93024+ ACCESS_ONCE_RW(list->prev) = list;
93025 }
93026
93027 /*
93028@@ -59,6 +59,9 @@ void __list_add_rcu(struct list_head *new,
93029 struct list_head *prev, struct list_head *next);
93030 #endif
93031
93032+void __pax_list_add_rcu(struct list_head *new,
93033+ struct list_head *prev, struct list_head *next);
93034+
93035 /**
93036 * list_add_rcu - add a new entry to rcu-protected list
93037 * @new: new entry to be added
93038@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
93039 __list_add_rcu(new, head, head->next);
93040 }
93041
93042+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
93043+{
93044+ __pax_list_add_rcu(new, head, head->next);
93045+}
93046+
93047 /**
93048 * list_add_tail_rcu - add a new entry to rcu-protected list
93049 * @new: new entry to be added
93050@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
93051 __list_add_rcu(new, head->prev, head);
93052 }
93053
93054+static inline void pax_list_add_tail_rcu(struct list_head *new,
93055+ struct list_head *head)
93056+{
93057+ __pax_list_add_rcu(new, head->prev, head);
93058+}
93059+
93060 /**
93061 * list_del_rcu - deletes entry from list without re-initialization
93062 * @entry: the element to delete from the list.
93063@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
93064 entry->prev = LIST_POISON2;
93065 }
93066
93067+extern void pax_list_del_rcu(struct list_head *entry);
93068+
93069 /**
93070 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
93071 * @n: the element to delete from the hash list.
93072diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
93073index 7809749..1cd9315 100644
93074--- a/include/linux/rcupdate.h
93075+++ b/include/linux/rcupdate.h
93076@@ -333,7 +333,7 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
93077 do { \
93078 rcu_all_qs(); \
93079 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
93080- ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
93081+ ACCESS_ONCE_RW((t)->rcu_tasks_holdout) = false; \
93082 } while (0)
93083 #else /* #ifdef CONFIG_TASKS_RCU */
93084 #define TASKS_RCU(x) do { } while (0)
93085diff --git a/include/linux/reboot.h b/include/linux/reboot.h
93086index 67fc8fc..a90f7d8 100644
93087--- a/include/linux/reboot.h
93088+++ b/include/linux/reboot.h
93089@@ -47,9 +47,9 @@ extern void do_kernel_restart(char *cmd);
93090 */
93091
93092 extern void migrate_to_reboot_cpu(void);
93093-extern void machine_restart(char *cmd);
93094-extern void machine_halt(void);
93095-extern void machine_power_off(void);
93096+extern void machine_restart(char *cmd) __noreturn;
93097+extern void machine_halt(void) __noreturn;
93098+extern void machine_power_off(void) __noreturn;
93099
93100 extern void machine_shutdown(void);
93101 struct pt_regs;
93102@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
93103 */
93104
93105 extern void kernel_restart_prepare(char *cmd);
93106-extern void kernel_restart(char *cmd);
93107-extern void kernel_halt(void);
93108-extern void kernel_power_off(void);
93109+extern void kernel_restart(char *cmd) __noreturn;
93110+extern void kernel_halt(void) __noreturn;
93111+extern void kernel_power_off(void) __noreturn;
93112
93113 extern int C_A_D; /* for sysctl */
93114 void ctrl_alt_del(void);
93115@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
93116 * Emergency restart, callable from an interrupt handler.
93117 */
93118
93119-extern void emergency_restart(void);
93120+extern void emergency_restart(void) __noreturn;
93121 #include <asm/emergency-restart.h>
93122
93123 #endif /* _LINUX_REBOOT_H */
93124diff --git a/include/linux/regset.h b/include/linux/regset.h
93125index 8e0c9fe..ac4d221 100644
93126--- a/include/linux/regset.h
93127+++ b/include/linux/regset.h
93128@@ -161,7 +161,8 @@ struct user_regset {
93129 unsigned int align;
93130 unsigned int bias;
93131 unsigned int core_note_type;
93132-};
93133+} __do_const;
93134+typedef struct user_regset __no_const user_regset_no_const;
93135
93136 /**
93137 * struct user_regset_view - available regsets
93138diff --git a/include/linux/relay.h b/include/linux/relay.h
93139index d7c8359..818daf5 100644
93140--- a/include/linux/relay.h
93141+++ b/include/linux/relay.h
93142@@ -157,7 +157,7 @@ struct rchan_callbacks
93143 * The callback should return 0 if successful, negative if not.
93144 */
93145 int (*remove_buf_file)(struct dentry *dentry);
93146-};
93147+} __no_const;
93148
93149 /*
93150 * CONFIG_RELAY kernel API, kernel/relay.c
93151diff --git a/include/linux/rio.h b/include/linux/rio.h
93152index 6bda06f..bf39a9b 100644
93153--- a/include/linux/rio.h
93154+++ b/include/linux/rio.h
93155@@ -358,7 +358,7 @@ struct rio_ops {
93156 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
93157 u64 rstart, u32 size, u32 flags);
93158 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
93159-};
93160+} __no_const;
93161
93162 #define RIO_RESOURCE_MEM 0x00000100
93163 #define RIO_RESOURCE_DOORBELL 0x00000200
93164diff --git a/include/linux/rmap.h b/include/linux/rmap.h
93165index c4c559a..6ba9a26 100644
93166--- a/include/linux/rmap.h
93167+++ b/include/linux/rmap.h
93168@@ -154,8 +154,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
93169 void anon_vma_init(void); /* create anon_vma_cachep */
93170 int anon_vma_prepare(struct vm_area_struct *);
93171 void unlink_anon_vmas(struct vm_area_struct *);
93172-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
93173-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
93174+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
93175+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
93176
93177 static inline void anon_vma_merge(struct vm_area_struct *vma,
93178 struct vm_area_struct *next)
93179diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
93180index ed8f9e70..999bc96 100644
93181--- a/include/linux/scatterlist.h
93182+++ b/include/linux/scatterlist.h
93183@@ -1,6 +1,7 @@
93184 #ifndef _LINUX_SCATTERLIST_H
93185 #define _LINUX_SCATTERLIST_H
93186
93187+#include <linux/sched.h>
93188 #include <linux/string.h>
93189 #include <linux/bug.h>
93190 #include <linux/mm.h>
93191@@ -114,6 +115,12 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
93192 #ifdef CONFIG_DEBUG_SG
93193 BUG_ON(!virt_addr_valid(buf));
93194 #endif
93195+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
93196+ if (object_starts_on_stack(buf)) {
93197+ void *adjbuf = buf - current->stack + current->lowmem_stack;
93198+ sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf));
93199+ } else
93200+#endif
93201 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
93202 }
93203
93204diff --git a/include/linux/sched.h b/include/linux/sched.h
93205index 51348f7..8c8b0ba 100644
93206--- a/include/linux/sched.h
93207+++ b/include/linux/sched.h
93208@@ -133,6 +133,7 @@ struct fs_struct;
93209 struct perf_event_context;
93210 struct blk_plug;
93211 struct filename;
93212+struct linux_binprm;
93213
93214 #define VMACACHE_BITS 2
93215 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
93216@@ -420,7 +421,7 @@ extern char __sched_text_start[], __sched_text_end[];
93217 extern int in_sched_functions(unsigned long addr);
93218
93219 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
93220-extern signed long schedule_timeout(signed long timeout);
93221+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
93222 extern signed long schedule_timeout_interruptible(signed long timeout);
93223 extern signed long schedule_timeout_killable(signed long timeout);
93224 extern signed long schedule_timeout_uninterruptible(signed long timeout);
93225@@ -438,6 +439,19 @@ struct nsproxy;
93226 struct user_namespace;
93227
93228 #ifdef CONFIG_MMU
93229+
93230+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
93231+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
93232+#else
93233+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
93234+{
93235+ return 0;
93236+}
93237+#endif
93238+
93239+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
93240+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
93241+
93242 extern void arch_pick_mmap_layout(struct mm_struct *mm);
93243 extern unsigned long
93244 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
93245@@ -736,6 +750,17 @@ struct signal_struct {
93246 #ifdef CONFIG_TASKSTATS
93247 struct taskstats *stats;
93248 #endif
93249+
93250+#ifdef CONFIG_GRKERNSEC
93251+ u32 curr_ip;
93252+ u32 saved_ip;
93253+ u32 gr_saddr;
93254+ u32 gr_daddr;
93255+ u16 gr_sport;
93256+ u16 gr_dport;
93257+ u8 used_accept:1;
93258+#endif
93259+
93260 #ifdef CONFIG_AUDIT
93261 unsigned audit_tty;
93262 unsigned audit_tty_log_passwd;
93263@@ -762,7 +787,7 @@ struct signal_struct {
93264 struct mutex cred_guard_mutex; /* guard against foreign influences on
93265 * credential calculations
93266 * (notably. ptrace) */
93267-};
93268+} __randomize_layout;
93269
93270 /*
93271 * Bits in flags field of signal_struct.
93272@@ -815,6 +840,14 @@ struct user_struct {
93273 struct key *session_keyring; /* UID's default session keyring */
93274 #endif
93275
93276+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
93277+ unsigned char kernel_banned;
93278+#endif
93279+#ifdef CONFIG_GRKERNSEC_BRUTE
93280+ unsigned char suid_banned;
93281+ unsigned long suid_ban_expires;
93282+#endif
93283+
93284 /* Hash table maintenance information */
93285 struct hlist_node uidhash_node;
93286 kuid_t uid;
93287@@ -822,7 +855,7 @@ struct user_struct {
93288 #ifdef CONFIG_PERF_EVENTS
93289 atomic_long_t locked_vm;
93290 #endif
93291-};
93292+} __randomize_layout;
93293
93294 extern int uids_sysfs_init(void);
93295
93296@@ -1286,6 +1319,9 @@ enum perf_event_task_context {
93297 struct task_struct {
93298 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
93299 void *stack;
93300+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
93301+ void *lowmem_stack;
93302+#endif
93303 atomic_t usage;
93304 unsigned int flags; /* per process flags, defined below */
93305 unsigned int ptrace;
93306@@ -1419,8 +1455,8 @@ struct task_struct {
93307 struct list_head thread_node;
93308
93309 struct completion *vfork_done; /* for vfork() */
93310- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
93311- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
93312+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
93313+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
93314
93315 cputime_t utime, stime, utimescaled, stimescaled;
93316 cputime_t gtime;
93317@@ -1445,11 +1481,6 @@ struct task_struct {
93318 struct task_cputime cputime_expires;
93319 struct list_head cpu_timers[3];
93320
93321-/* process credentials */
93322- const struct cred __rcu *real_cred; /* objective and real subjective task
93323- * credentials (COW) */
93324- const struct cred __rcu *cred; /* effective (overridable) subjective task
93325- * credentials (COW) */
93326 char comm[TASK_COMM_LEN]; /* executable name excluding path
93327 - access with [gs]et_task_comm (which lock
93328 it with task_lock())
93329@@ -1467,6 +1498,10 @@ struct task_struct {
93330 #endif
93331 /* CPU-specific state of this task */
93332 struct thread_struct thread;
93333+/* thread_info moved to task_struct */
93334+#ifdef CONFIG_X86
93335+ struct thread_info tinfo;
93336+#endif
93337 /* filesystem information */
93338 struct fs_struct *fs;
93339 /* open file information */
93340@@ -1541,6 +1576,10 @@ struct task_struct {
93341 gfp_t lockdep_reclaim_gfp;
93342 #endif
93343
93344+/* process credentials */
93345+ const struct cred __rcu *real_cred; /* objective and real subjective task
93346+ * credentials (COW) */
93347+
93348 /* journalling filesystem info */
93349 void *journal_info;
93350
93351@@ -1579,6 +1618,10 @@ struct task_struct {
93352 /* cg_list protected by css_set_lock and tsk->alloc_lock */
93353 struct list_head cg_list;
93354 #endif
93355+
93356+ const struct cred __rcu *cred; /* effective (overridable) subjective task
93357+ * credentials (COW) */
93358+
93359 #ifdef CONFIG_FUTEX
93360 struct robust_list_head __user *robust_list;
93361 #ifdef CONFIG_COMPAT
93362@@ -1690,7 +1733,7 @@ struct task_struct {
93363 * Number of functions that haven't been traced
93364 * because of depth overrun.
93365 */
93366- atomic_t trace_overrun;
93367+ atomic_unchecked_t trace_overrun;
93368 /* Pause for the tracing */
93369 atomic_t tracing_graph_pause;
93370 #endif
93371@@ -1718,7 +1761,78 @@ struct task_struct {
93372 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
93373 unsigned long task_state_change;
93374 #endif
93375-};
93376+
93377+#ifdef CONFIG_GRKERNSEC
93378+ /* grsecurity */
93379+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
93380+ u64 exec_id;
93381+#endif
93382+#ifdef CONFIG_GRKERNSEC_SETXID
93383+ const struct cred *delayed_cred;
93384+#endif
93385+ struct dentry *gr_chroot_dentry;
93386+ struct acl_subject_label *acl;
93387+ struct acl_subject_label *tmpacl;
93388+ struct acl_role_label *role;
93389+ struct file *exec_file;
93390+ unsigned long brute_expires;
93391+ u16 acl_role_id;
93392+ u8 inherited;
93393+ /* is this the task that authenticated to the special role */
93394+ u8 acl_sp_role;
93395+ u8 is_writable;
93396+ u8 brute;
93397+ u8 gr_is_chrooted;
93398+#endif
93399+
93400+} __randomize_layout;
93401+
93402+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
93403+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
93404+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
93405+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
93406+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
93407+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
93408+
93409+#ifdef CONFIG_PAX_SOFTMODE
93410+extern int pax_softmode;
93411+#endif
93412+
93413+extern int pax_check_flags(unsigned long *);
93414+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
93415+
93416+/* if tsk != current then task_lock must be held on it */
93417+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
93418+static inline unsigned long pax_get_flags(struct task_struct *tsk)
93419+{
93420+ if (likely(tsk->mm))
93421+ return tsk->mm->pax_flags;
93422+ else
93423+ return 0UL;
93424+}
93425+
93426+/* if tsk != current then task_lock must be held on it */
93427+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
93428+{
93429+ if (likely(tsk->mm)) {
93430+ tsk->mm->pax_flags = flags;
93431+ return 0;
93432+ }
93433+ return -EINVAL;
93434+}
93435+#endif
93436+
93437+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
93438+extern void pax_set_initial_flags(struct linux_binprm *bprm);
93439+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
93440+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
93441+#endif
93442+
93443+struct path;
93444+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
93445+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
93446+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
93447+extern void pax_report_refcount_overflow(struct pt_regs *regs);
93448
93449 /* Future-safe accessor for struct task_struct's cpus_allowed. */
93450 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
93451@@ -1801,7 +1915,7 @@ struct pid_namespace;
93452 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
93453 struct pid_namespace *ns);
93454
93455-static inline pid_t task_pid_nr(struct task_struct *tsk)
93456+static inline pid_t task_pid_nr(const struct task_struct *tsk)
93457 {
93458 return tsk->pid;
93459 }
93460@@ -2169,6 +2283,25 @@ extern u64 sched_clock_cpu(int cpu);
93461
93462 extern void sched_clock_init(void);
93463
93464+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
93465+static inline void populate_stack(void)
93466+{
93467+ struct task_struct *curtask = current;
93468+ int c;
93469+ int *ptr = curtask->stack;
93470+ int *end = curtask->stack + THREAD_SIZE;
93471+
93472+ while (ptr < end) {
93473+ c = *(volatile int *)ptr;
93474+ ptr += PAGE_SIZE/sizeof(int);
93475+ }
93476+}
93477+#else
93478+static inline void populate_stack(void)
93479+{
93480+}
93481+#endif
93482+
93483 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
93484 static inline void sched_clock_tick(void)
93485 {
93486@@ -2302,7 +2435,9 @@ void yield(void);
93487 extern struct exec_domain default_exec_domain;
93488
93489 union thread_union {
93490+#ifndef CONFIG_X86
93491 struct thread_info thread_info;
93492+#endif
93493 unsigned long stack[THREAD_SIZE/sizeof(long)];
93494 };
93495
93496@@ -2335,6 +2470,7 @@ extern struct pid_namespace init_pid_ns;
93497 */
93498
93499 extern struct task_struct *find_task_by_vpid(pid_t nr);
93500+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
93501 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
93502 struct pid_namespace *ns);
93503
93504@@ -2499,7 +2635,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
93505 extern void exit_itimers(struct signal_struct *);
93506 extern void flush_itimer_signals(void);
93507
93508-extern void do_group_exit(int);
93509+extern __noreturn void do_group_exit(int);
93510
93511 extern int do_execve(struct filename *,
93512 const char __user * const __user *,
93513@@ -2720,9 +2856,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
93514 #define task_stack_end_corrupted(task) \
93515 (*(end_of_stack(task)) != STACK_END_MAGIC)
93516
93517-static inline int object_is_on_stack(void *obj)
93518+static inline int object_starts_on_stack(const void *obj)
93519 {
93520- void *stack = task_stack_page(current);
93521+ const void *stack = task_stack_page(current);
93522
93523 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
93524 }
93525diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
93526index 596a0e0..bea77ec 100644
93527--- a/include/linux/sched/sysctl.h
93528+++ b/include/linux/sched/sysctl.h
93529@@ -34,6 +34,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
93530 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
93531
93532 extern int sysctl_max_map_count;
93533+extern unsigned long sysctl_heap_stack_gap;
93534
93535 extern unsigned int sysctl_sched_latency;
93536 extern unsigned int sysctl_sched_min_granularity;
93537diff --git a/include/linux/security.h b/include/linux/security.h
93538index a1b7dbd..036f47f 100644
93539--- a/include/linux/security.h
93540+++ b/include/linux/security.h
93541@@ -27,6 +27,7 @@
93542 #include <linux/slab.h>
93543 #include <linux/err.h>
93544 #include <linux/string.h>
93545+#include <linux/grsecurity.h>
93546
93547 struct linux_binprm;
93548 struct cred;
93549@@ -116,8 +117,6 @@ struct seq_file;
93550
93551 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
93552
93553-void reset_security_ops(void);
93554-
93555 #ifdef CONFIG_MMU
93556 extern unsigned long mmap_min_addr;
93557 extern unsigned long dac_mmap_min_addr;
93558@@ -1756,7 +1755,7 @@ struct security_operations {
93559 struct audit_context *actx);
93560 void (*audit_rule_free) (void *lsmrule);
93561 #endif /* CONFIG_AUDIT */
93562-};
93563+} __randomize_layout;
93564
93565 /* prototypes */
93566 extern int security_init(void);
93567diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
93568index dc368b8..e895209 100644
93569--- a/include/linux/semaphore.h
93570+++ b/include/linux/semaphore.h
93571@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
93572 }
93573
93574 extern void down(struct semaphore *sem);
93575-extern int __must_check down_interruptible(struct semaphore *sem);
93576+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
93577 extern int __must_check down_killable(struct semaphore *sem);
93578 extern int __must_check down_trylock(struct semaphore *sem);
93579 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
93580diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
93581index afbb1fd..e1d205d 100644
93582--- a/include/linux/seq_file.h
93583+++ b/include/linux/seq_file.h
93584@@ -27,6 +27,9 @@ struct seq_file {
93585 struct mutex lock;
93586 const struct seq_operations *op;
93587 int poll_event;
93588+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
93589+ u64 exec_id;
93590+#endif
93591 #ifdef CONFIG_USER_NS
93592 struct user_namespace *user_ns;
93593 #endif
93594@@ -39,6 +42,7 @@ struct seq_operations {
93595 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
93596 int (*show) (struct seq_file *m, void *v);
93597 };
93598+typedef struct seq_operations __no_const seq_operations_no_const;
93599
93600 #define SEQ_SKIP 1
93601
93602@@ -111,6 +115,7 @@ void seq_pad(struct seq_file *m, char c);
93603
93604 char *mangle_path(char *s, const char *p, const char *esc);
93605 int seq_open(struct file *, const struct seq_operations *);
93606+int seq_open_restrict(struct file *, const struct seq_operations *);
93607 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
93608 loff_t seq_lseek(struct file *, loff_t, int);
93609 int seq_release(struct inode *, struct file *);
93610@@ -128,6 +133,7 @@ int seq_path_root(struct seq_file *m, const struct path *path,
93611 const struct path *root, const char *esc);
93612
93613 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
93614+int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *);
93615 int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
93616 int single_release(struct inode *, struct file *);
93617 void *__seq_open_private(struct file *, const struct seq_operations *, int);
93618diff --git a/include/linux/shm.h b/include/linux/shm.h
93619index 6fb8016..ab4465e 100644
93620--- a/include/linux/shm.h
93621+++ b/include/linux/shm.h
93622@@ -22,6 +22,10 @@ struct shmid_kernel /* private to the kernel */
93623 /* The task created the shm object. NULL if the task is dead. */
93624 struct task_struct *shm_creator;
93625 struct list_head shm_clist; /* list by creator */
93626+#ifdef CONFIG_GRKERNSEC
93627+ u64 shm_createtime;
93628+ pid_t shm_lapid;
93629+#endif
93630 };
93631
93632 /* shm_mode upper byte flags */
93633diff --git a/include/linux/signal.h b/include/linux/signal.h
93634index ab1e039..ad4229e 100644
93635--- a/include/linux/signal.h
93636+++ b/include/linux/signal.h
93637@@ -289,7 +289,7 @@ static inline void allow_signal(int sig)
93638 * know it'll be handled, so that they don't get converted to
93639 * SIGKILL or just silently dropped.
93640 */
93641- kernel_sigaction(sig, (__force __sighandler_t)2);
93642+ kernel_sigaction(sig, (__force_user __sighandler_t)2);
93643 }
93644
93645 static inline void disallow_signal(int sig)
93646diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
93647index bdccc4b..e9f8670 100644
93648--- a/include/linux/skbuff.h
93649+++ b/include/linux/skbuff.h
93650@@ -771,7 +771,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
93651 int node);
93652 struct sk_buff *__build_skb(void *data, unsigned int frag_size);
93653 struct sk_buff *build_skb(void *data, unsigned int frag_size);
93654-static inline struct sk_buff *alloc_skb(unsigned int size,
93655+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
93656 gfp_t priority)
93657 {
93658 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
93659@@ -1967,7 +1967,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
93660 return skb->inner_transport_header - skb->inner_network_header;
93661 }
93662
93663-static inline int skb_network_offset(const struct sk_buff *skb)
93664+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
93665 {
93666 return skb_network_header(skb) - skb->data;
93667 }
93668@@ -2027,7 +2027,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
93669 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
93670 */
93671 #ifndef NET_SKB_PAD
93672-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
93673+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
93674 #endif
93675
93676 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
93677@@ -2669,9 +2669,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
93678 int *err);
93679 unsigned int datagram_poll(struct file *file, struct socket *sock,
93680 struct poll_table_struct *wait);
93681-int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
93682+int __intentional_overflow(0) skb_copy_datagram_iter(const struct sk_buff *from, int offset,
93683 struct iov_iter *to, int size);
93684-static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
93685+static inline int __intentional_overflow(2,4) skb_copy_datagram_msg(const struct sk_buff *from, int offset,
93686 struct msghdr *msg, int size)
93687 {
93688 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
93689@@ -3193,6 +3193,9 @@ static inline void nf_reset(struct sk_buff *skb)
93690 nf_bridge_put(skb->nf_bridge);
93691 skb->nf_bridge = NULL;
93692 #endif
93693+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
93694+ skb->nf_trace = 0;
93695+#endif
93696 }
93697
93698 static inline void nf_reset_trace(struct sk_buff *skb)
93699diff --git a/include/linux/slab.h b/include/linux/slab.h
93700index 76f1fee..d95e6d2 100644
93701--- a/include/linux/slab.h
93702+++ b/include/linux/slab.h
93703@@ -14,15 +14,29 @@
93704 #include <linux/gfp.h>
93705 #include <linux/types.h>
93706 #include <linux/workqueue.h>
93707-
93708+#include <linux/err.h>
93709
93710 /*
93711 * Flags to pass to kmem_cache_create().
93712 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
93713 */
93714 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
93715+
93716+#ifdef CONFIG_PAX_USERCOPY_SLABS
93717+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
93718+#else
93719+#define SLAB_USERCOPY 0x00000000UL
93720+#endif
93721+
93722 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
93723 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
93724+
93725+#ifdef CONFIG_PAX_MEMORY_SANITIZE
93726+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
93727+#else
93728+#define SLAB_NO_SANITIZE 0x00000000UL
93729+#endif
93730+
93731 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
93732 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
93733 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
93734@@ -98,10 +112,13 @@
93735 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
93736 * Both make kfree a no-op.
93737 */
93738-#define ZERO_SIZE_PTR ((void *)16)
93739+#define ZERO_SIZE_PTR \
93740+({ \
93741+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
93742+ (void *)(-MAX_ERRNO-1L); \
93743+})
93744
93745-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
93746- (unsigned long)ZERO_SIZE_PTR)
93747+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
93748
93749 #include <linux/kmemleak.h>
93750 #include <linux/kasan.h>
93751@@ -143,6 +160,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
93752 void kfree(const void *);
93753 void kzfree(const void *);
93754 size_t ksize(const void *);
93755+const char *check_heap_object(const void *ptr, unsigned long n);
93756+bool is_usercopy_object(const void *ptr);
93757
93758 /*
93759 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
93760@@ -235,6 +254,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
93761 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
93762 #endif
93763
93764+#ifdef CONFIG_PAX_USERCOPY_SLABS
93765+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
93766+#endif
93767+
93768 /*
93769 * Figure out which kmalloc slab an allocation of a certain size
93770 * belongs to.
93771@@ -243,7 +266,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
93772 * 2 = 120 .. 192 bytes
93773 * n = 2^(n-1) .. 2^n -1
93774 */
93775-static __always_inline int kmalloc_index(size_t size)
93776+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
93777 {
93778 if (!size)
93779 return 0;
93780@@ -286,15 +309,15 @@ static __always_inline int kmalloc_index(size_t size)
93781 }
93782 #endif /* !CONFIG_SLOB */
93783
93784-void *__kmalloc(size_t size, gfp_t flags);
93785+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
93786 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
93787 void kmem_cache_free(struct kmem_cache *, void *);
93788
93789 #ifdef CONFIG_NUMA
93790-void *__kmalloc_node(size_t size, gfp_t flags, int node);
93791+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1) __size_overflow(1);
93792 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
93793 #else
93794-static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
93795+static __always_inline void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
93796 {
93797 return __kmalloc(size, flags);
93798 }
93799diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
93800index 33d0490..70a6313 100644
93801--- a/include/linux/slab_def.h
93802+++ b/include/linux/slab_def.h
93803@@ -40,7 +40,7 @@ struct kmem_cache {
93804 /* 4) cache creation/removal */
93805 const char *name;
93806 struct list_head list;
93807- int refcount;
93808+ atomic_t refcount;
93809 int object_size;
93810 int align;
93811
93812@@ -56,10 +56,14 @@ struct kmem_cache {
93813 unsigned long node_allocs;
93814 unsigned long node_frees;
93815 unsigned long node_overflow;
93816- atomic_t allochit;
93817- atomic_t allocmiss;
93818- atomic_t freehit;
93819- atomic_t freemiss;
93820+ atomic_unchecked_t allochit;
93821+ atomic_unchecked_t allocmiss;
93822+ atomic_unchecked_t freehit;
93823+ atomic_unchecked_t freemiss;
93824+#ifdef CONFIG_PAX_MEMORY_SANITIZE
93825+ atomic_unchecked_t sanitized;
93826+ atomic_unchecked_t not_sanitized;
93827+#endif
93828
93829 /*
93830 * If debugging is enabled, then the allocator can add additional
93831diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
93832index 3388511..6252f90 100644
93833--- a/include/linux/slub_def.h
93834+++ b/include/linux/slub_def.h
93835@@ -74,7 +74,7 @@ struct kmem_cache {
93836 struct kmem_cache_order_objects max;
93837 struct kmem_cache_order_objects min;
93838 gfp_t allocflags; /* gfp flags to use on each alloc */
93839- int refcount; /* Refcount for slab cache destroy */
93840+ atomic_t refcount; /* Refcount for slab cache destroy */
93841 void (*ctor)(void *);
93842 int inuse; /* Offset to metadata */
93843 int align; /* Alignment */
93844diff --git a/include/linux/smp.h b/include/linux/smp.h
93845index be91db2..3f23232 100644
93846--- a/include/linux/smp.h
93847+++ b/include/linux/smp.h
93848@@ -183,7 +183,9 @@ static inline void smp_init(void) { }
93849 #endif
93850
93851 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
93852+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
93853 #define put_cpu() preempt_enable()
93854+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
93855
93856 /*
93857 * Callback to arch code if there's nosmp or maxcpus=0 on the
93858diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
93859index 46cca4c..3323536 100644
93860--- a/include/linux/sock_diag.h
93861+++ b/include/linux/sock_diag.h
93862@@ -11,7 +11,7 @@ struct sock;
93863 struct sock_diag_handler {
93864 __u8 family;
93865 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
93866-};
93867+} __do_const;
93868
93869 int sock_diag_register(const struct sock_diag_handler *h);
93870 void sock_diag_unregister(const struct sock_diag_handler *h);
93871diff --git a/include/linux/sonet.h b/include/linux/sonet.h
93872index 680f9a3..f13aeb0 100644
93873--- a/include/linux/sonet.h
93874+++ b/include/linux/sonet.h
93875@@ -7,7 +7,7 @@
93876 #include <uapi/linux/sonet.h>
93877
93878 struct k_sonet_stats {
93879-#define __HANDLE_ITEM(i) atomic_t i
93880+#define __HANDLE_ITEM(i) atomic_unchecked_t i
93881 __SONET_ITEMS
93882 #undef __HANDLE_ITEM
93883 };
93884diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
93885index 07d8e53..dc934c9 100644
93886--- a/include/linux/sunrpc/addr.h
93887+++ b/include/linux/sunrpc/addr.h
93888@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
93889 {
93890 switch (sap->sa_family) {
93891 case AF_INET:
93892- return ntohs(((struct sockaddr_in *)sap)->sin_port);
93893+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
93894 case AF_INET6:
93895- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
93896+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
93897 }
93898 return 0;
93899 }
93900@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
93901 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
93902 const struct sockaddr *src)
93903 {
93904- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
93905+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
93906 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
93907
93908 dsin->sin_family = ssin->sin_family;
93909@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
93910 if (sa->sa_family != AF_INET6)
93911 return 0;
93912
93913- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
93914+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
93915 }
93916
93917 #endif /* _LINUX_SUNRPC_ADDR_H */
93918diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
93919index 598ba80..d90cba6 100644
93920--- a/include/linux/sunrpc/clnt.h
93921+++ b/include/linux/sunrpc/clnt.h
93922@@ -100,7 +100,7 @@ struct rpc_procinfo {
93923 unsigned int p_timer; /* Which RTT timer to use */
93924 u32 p_statidx; /* Which procedure to account */
93925 const char * p_name; /* name of procedure */
93926-};
93927+} __do_const;
93928
93929 #ifdef __KERNEL__
93930
93931diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
93932index fae6fb9..023fbcd 100644
93933--- a/include/linux/sunrpc/svc.h
93934+++ b/include/linux/sunrpc/svc.h
93935@@ -420,7 +420,7 @@ struct svc_procedure {
93936 unsigned int pc_count; /* call count */
93937 unsigned int pc_cachetype; /* cache info (NFS) */
93938 unsigned int pc_xdrressize; /* maximum size of XDR reply */
93939-};
93940+} __do_const;
93941
93942 /*
93943 * Function prototypes.
93944diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
93945index df8edf8..d140fec 100644
93946--- a/include/linux/sunrpc/svc_rdma.h
93947+++ b/include/linux/sunrpc/svc_rdma.h
93948@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
93949 extern unsigned int svcrdma_max_requests;
93950 extern unsigned int svcrdma_max_req_size;
93951
93952-extern atomic_t rdma_stat_recv;
93953-extern atomic_t rdma_stat_read;
93954-extern atomic_t rdma_stat_write;
93955-extern atomic_t rdma_stat_sq_starve;
93956-extern atomic_t rdma_stat_rq_starve;
93957-extern atomic_t rdma_stat_rq_poll;
93958-extern atomic_t rdma_stat_rq_prod;
93959-extern atomic_t rdma_stat_sq_poll;
93960-extern atomic_t rdma_stat_sq_prod;
93961+extern atomic_unchecked_t rdma_stat_recv;
93962+extern atomic_unchecked_t rdma_stat_read;
93963+extern atomic_unchecked_t rdma_stat_write;
93964+extern atomic_unchecked_t rdma_stat_sq_starve;
93965+extern atomic_unchecked_t rdma_stat_rq_starve;
93966+extern atomic_unchecked_t rdma_stat_rq_poll;
93967+extern atomic_unchecked_t rdma_stat_rq_prod;
93968+extern atomic_unchecked_t rdma_stat_sq_poll;
93969+extern atomic_unchecked_t rdma_stat_sq_prod;
93970
93971 /*
93972 * Contexts are built when an RDMA request is created and are a
93973diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
93974index 8d71d65..f79586e 100644
93975--- a/include/linux/sunrpc/svcauth.h
93976+++ b/include/linux/sunrpc/svcauth.h
93977@@ -120,7 +120,7 @@ struct auth_ops {
93978 int (*release)(struct svc_rqst *rq);
93979 void (*domain_release)(struct auth_domain *);
93980 int (*set_client)(struct svc_rqst *rq);
93981-};
93982+} __do_const;
93983
93984 #define SVC_GARBAGE 1
93985 #define SVC_SYSERR 2
93986diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
93987index e7a018e..49f8b17 100644
93988--- a/include/linux/swiotlb.h
93989+++ b/include/linux/swiotlb.h
93990@@ -60,7 +60,8 @@ extern void
93991
93992 extern void
93993 swiotlb_free_coherent(struct device *hwdev, size_t size,
93994- void *vaddr, dma_addr_t dma_handle);
93995+ void *vaddr, dma_addr_t dma_handle,
93996+ struct dma_attrs *attrs);
93997
93998 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
93999 unsigned long offset, size_t size,
94000diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
94001index 76d1e38..200776e 100644
94002--- a/include/linux/syscalls.h
94003+++ b/include/linux/syscalls.h
94004@@ -102,7 +102,12 @@ union bpf_attr;
94005 #define __TYPE_IS_L(t) (__same_type((t)0, 0L))
94006 #define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
94007 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
94008-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
94009+#define __SC_LONG(t, a) __typeof__( \
94010+ __builtin_choose_expr( \
94011+ sizeof(t) > sizeof(int), \
94012+ (t) 0, \
94013+ __builtin_choose_expr(__type_is_unsigned(t), 0UL, 0L) \
94014+ )) a
94015 #define __SC_CAST(t, a) (t) a
94016 #define __SC_ARGS(t, a) a
94017 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
94018@@ -384,11 +389,11 @@ asmlinkage long sys_sync(void);
94019 asmlinkage long sys_fsync(unsigned int fd);
94020 asmlinkage long sys_fdatasync(unsigned int fd);
94021 asmlinkage long sys_bdflush(int func, long data);
94022-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
94023- char __user *type, unsigned long flags,
94024+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
94025+ const char __user *type, unsigned long flags,
94026 void __user *data);
94027-asmlinkage long sys_umount(char __user *name, int flags);
94028-asmlinkage long sys_oldumount(char __user *name);
94029+asmlinkage long sys_umount(const char __user *name, int flags);
94030+asmlinkage long sys_oldumount(const char __user *name);
94031 asmlinkage long sys_truncate(const char __user *path, long length);
94032 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
94033 asmlinkage long sys_stat(const char __user *filename,
94034@@ -604,7 +609,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
94035 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
94036 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
94037 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
94038- struct sockaddr __user *, int);
94039+ struct sockaddr __user *, int) __intentional_overflow(0);
94040 asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
94041 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
94042 unsigned int vlen, unsigned flags);
94043@@ -663,10 +668,10 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
94044
94045 asmlinkage long sys_semget(key_t key, int nsems, int semflg);
94046 asmlinkage long sys_semop(int semid, struct sembuf __user *sops,
94047- unsigned nsops);
94048+ long nsops);
94049 asmlinkage long sys_semctl(int semid, int semnum, int cmd, unsigned long arg);
94050 asmlinkage long sys_semtimedop(int semid, struct sembuf __user *sops,
94051- unsigned nsops,
94052+ long nsops,
94053 const struct timespec __user *timeout);
94054 asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg);
94055 asmlinkage long sys_shmget(key_t key, size_t size, int flag);
94056diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
94057index 27b3b0b..e093dd9 100644
94058--- a/include/linux/syscore_ops.h
94059+++ b/include/linux/syscore_ops.h
94060@@ -16,7 +16,7 @@ struct syscore_ops {
94061 int (*suspend)(void);
94062 void (*resume)(void);
94063 void (*shutdown)(void);
94064-};
94065+} __do_const;
94066
94067 extern void register_syscore_ops(struct syscore_ops *ops);
94068 extern void unregister_syscore_ops(struct syscore_ops *ops);
94069diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
94070index b7361f8..341a15a 100644
94071--- a/include/linux/sysctl.h
94072+++ b/include/linux/sysctl.h
94073@@ -39,6 +39,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
94074
94075 extern int proc_dostring(struct ctl_table *, int,
94076 void __user *, size_t *, loff_t *);
94077+extern int proc_dostring_modpriv(struct ctl_table *, int,
94078+ void __user *, size_t *, loff_t *);
94079 extern int proc_dointvec(struct ctl_table *, int,
94080 void __user *, size_t *, loff_t *);
94081 extern int proc_dointvec_minmax(struct ctl_table *, int,
94082@@ -113,7 +115,8 @@ struct ctl_table
94083 struct ctl_table_poll *poll;
94084 void *extra1;
94085 void *extra2;
94086-};
94087+} __do_const __randomize_layout;
94088+typedef struct ctl_table __no_const ctl_table_no_const;
94089
94090 struct ctl_node {
94091 struct rb_node node;
94092diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
94093index ddad161..a3efd26 100644
94094--- a/include/linux/sysfs.h
94095+++ b/include/linux/sysfs.h
94096@@ -34,7 +34,8 @@ struct attribute {
94097 struct lock_class_key *key;
94098 struct lock_class_key skey;
94099 #endif
94100-};
94101+} __do_const;
94102+typedef struct attribute __no_const attribute_no_const;
94103
94104 /**
94105 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
94106@@ -63,7 +64,8 @@ struct attribute_group {
94107 struct attribute *, int);
94108 struct attribute **attrs;
94109 struct bin_attribute **bin_attrs;
94110-};
94111+} __do_const;
94112+typedef struct attribute_group __no_const attribute_group_no_const;
94113
94114 /**
94115 * Use these macros to make defining attributes easier. See include/linux/device.h
94116@@ -137,7 +139,8 @@ struct bin_attribute {
94117 char *, loff_t, size_t);
94118 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
94119 struct vm_area_struct *vma);
94120-};
94121+} __do_const;
94122+typedef struct bin_attribute __no_const bin_attribute_no_const;
94123
94124 /**
94125 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
94126diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
94127index 387fa7d..3fcde6b 100644
94128--- a/include/linux/sysrq.h
94129+++ b/include/linux/sysrq.h
94130@@ -16,6 +16,7 @@
94131
94132 #include <linux/errno.h>
94133 #include <linux/types.h>
94134+#include <linux/compiler.h>
94135
94136 /* Possible values of bitmask for enabling sysrq functions */
94137 /* 0x0001 is reserved for enable everything */
94138@@ -33,7 +34,7 @@ struct sysrq_key_op {
94139 char *help_msg;
94140 char *action_msg;
94141 int enable_mask;
94142-};
94143+} __do_const;
94144
94145 #ifdef CONFIG_MAGIC_SYSRQ
94146
94147diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
94148index ff307b5..f1a4468 100644
94149--- a/include/linux/thread_info.h
94150+++ b/include/linux/thread_info.h
94151@@ -145,6 +145,13 @@ static inline bool test_and_clear_restore_sigmask(void)
94152 #error "no set_restore_sigmask() provided and default one won't work"
94153 #endif
94154
94155+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
94156+
94157+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
94158+{
94159+ __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
94160+}
94161+
94162 #endif /* __KERNEL__ */
94163
94164 #endif /* _LINUX_THREAD_INFO_H */
94165diff --git a/include/linux/tty.h b/include/linux/tty.h
94166index 358a337..8829c1f 100644
94167--- a/include/linux/tty.h
94168+++ b/include/linux/tty.h
94169@@ -225,7 +225,7 @@ struct tty_port {
94170 const struct tty_port_operations *ops; /* Port operations */
94171 spinlock_t lock; /* Lock protecting tty field */
94172 int blocked_open; /* Waiting to open */
94173- int count; /* Usage count */
94174+ atomic_t count; /* Usage count */
94175 wait_queue_head_t open_wait; /* Open waiters */
94176 wait_queue_head_t close_wait; /* Close waiters */
94177 wait_queue_head_t delta_msr_wait; /* Modem status change */
94178@@ -313,7 +313,7 @@ struct tty_struct {
94179 /* If the tty has a pending do_SAK, queue it here - akpm */
94180 struct work_struct SAK_work;
94181 struct tty_port *port;
94182-};
94183+} __randomize_layout;
94184
94185 /* Each of a tty's open files has private_data pointing to tty_file_private */
94186 struct tty_file_private {
94187@@ -572,7 +572,7 @@ extern int tty_port_open(struct tty_port *port,
94188 struct tty_struct *tty, struct file *filp);
94189 static inline int tty_port_users(struct tty_port *port)
94190 {
94191- return port->count + port->blocked_open;
94192+ return atomic_read(&port->count) + port->blocked_open;
94193 }
94194
94195 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
94196diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
94197index 92e337c..f46757b 100644
94198--- a/include/linux/tty_driver.h
94199+++ b/include/linux/tty_driver.h
94200@@ -291,7 +291,7 @@ struct tty_operations {
94201 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
94202 #endif
94203 const struct file_operations *proc_fops;
94204-};
94205+} __do_const __randomize_layout;
94206
94207 struct tty_driver {
94208 int magic; /* magic number for this structure */
94209@@ -325,7 +325,7 @@ struct tty_driver {
94210
94211 const struct tty_operations *ops;
94212 struct list_head tty_drivers;
94213-};
94214+} __randomize_layout;
94215
94216 extern struct list_head tty_drivers;
94217
94218diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
94219index 00c9d68..bc0188b 100644
94220--- a/include/linux/tty_ldisc.h
94221+++ b/include/linux/tty_ldisc.h
94222@@ -215,7 +215,7 @@ struct tty_ldisc_ops {
94223
94224 struct module *owner;
94225
94226- int refcount;
94227+ atomic_t refcount;
94228 };
94229
94230 struct tty_ldisc {
94231diff --git a/include/linux/types.h b/include/linux/types.h
94232index 6747247..fc7ec8b 100644
94233--- a/include/linux/types.h
94234+++ b/include/linux/types.h
94235@@ -174,10 +174,26 @@ typedef struct {
94236 int counter;
94237 } atomic_t;
94238
94239+#ifdef CONFIG_PAX_REFCOUNT
94240+typedef struct {
94241+ int counter;
94242+} atomic_unchecked_t;
94243+#else
94244+typedef atomic_t atomic_unchecked_t;
94245+#endif
94246+
94247 #ifdef CONFIG_64BIT
94248 typedef struct {
94249 long counter;
94250 } atomic64_t;
94251+
94252+#ifdef CONFIG_PAX_REFCOUNT
94253+typedef struct {
94254+ long counter;
94255+} atomic64_unchecked_t;
94256+#else
94257+typedef atomic64_t atomic64_unchecked_t;
94258+#endif
94259 #endif
94260
94261 struct list_head {
94262diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
94263index ecd3319..8a36ded 100644
94264--- a/include/linux/uaccess.h
94265+++ b/include/linux/uaccess.h
94266@@ -75,11 +75,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
94267 long ret; \
94268 mm_segment_t old_fs = get_fs(); \
94269 \
94270- set_fs(KERNEL_DS); \
94271 pagefault_disable(); \
94272- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
94273- pagefault_enable(); \
94274+ set_fs(KERNEL_DS); \
94275+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
94276 set_fs(old_fs); \
94277+ pagefault_enable(); \
94278 ret; \
94279 })
94280
94281diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
94282index 2d1f9b6..d7a9fce 100644
94283--- a/include/linux/uidgid.h
94284+++ b/include/linux/uidgid.h
94285@@ -175,4 +175,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
94286
94287 #endif /* CONFIG_USER_NS */
94288
94289+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
94290+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
94291+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
94292+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
94293+
94294 #endif /* _LINUX_UIDGID_H */
94295diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
94296index 32c0e83..671eb35 100644
94297--- a/include/linux/uio_driver.h
94298+++ b/include/linux/uio_driver.h
94299@@ -67,7 +67,7 @@ struct uio_device {
94300 struct module *owner;
94301 struct device *dev;
94302 int minor;
94303- atomic_t event;
94304+ atomic_unchecked_t event;
94305 struct fasync_struct *async_queue;
94306 wait_queue_head_t wait;
94307 struct uio_info *info;
94308diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
94309index 99c1b4d..562e6f3 100644
94310--- a/include/linux/unaligned/access_ok.h
94311+++ b/include/linux/unaligned/access_ok.h
94312@@ -4,34 +4,34 @@
94313 #include <linux/kernel.h>
94314 #include <asm/byteorder.h>
94315
94316-static inline u16 get_unaligned_le16(const void *p)
94317+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
94318 {
94319- return le16_to_cpup((__le16 *)p);
94320+ return le16_to_cpup((const __le16 *)p);
94321 }
94322
94323-static inline u32 get_unaligned_le32(const void *p)
94324+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
94325 {
94326- return le32_to_cpup((__le32 *)p);
94327+ return le32_to_cpup((const __le32 *)p);
94328 }
94329
94330-static inline u64 get_unaligned_le64(const void *p)
94331+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
94332 {
94333- return le64_to_cpup((__le64 *)p);
94334+ return le64_to_cpup((const __le64 *)p);
94335 }
94336
94337-static inline u16 get_unaligned_be16(const void *p)
94338+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
94339 {
94340- return be16_to_cpup((__be16 *)p);
94341+ return be16_to_cpup((const __be16 *)p);
94342 }
94343
94344-static inline u32 get_unaligned_be32(const void *p)
94345+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
94346 {
94347- return be32_to_cpup((__be32 *)p);
94348+ return be32_to_cpup((const __be32 *)p);
94349 }
94350
94351-static inline u64 get_unaligned_be64(const void *p)
94352+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
94353 {
94354- return be64_to_cpup((__be64 *)p);
94355+ return be64_to_cpup((const __be64 *)p);
94356 }
94357
94358 static inline void put_unaligned_le16(u16 val, void *p)
94359diff --git a/include/linux/usb.h b/include/linux/usb.h
94360index 447fe29..9fc875f 100644
94361--- a/include/linux/usb.h
94362+++ b/include/linux/usb.h
94363@@ -592,7 +592,7 @@ struct usb_device {
94364 int maxchild;
94365
94366 u32 quirks;
94367- atomic_t urbnum;
94368+ atomic_unchecked_t urbnum;
94369
94370 unsigned long active_duration;
94371
94372@@ -1676,7 +1676,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
94373
94374 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
94375 __u8 request, __u8 requesttype, __u16 value, __u16 index,
94376- void *data, __u16 size, int timeout);
94377+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
94378 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
94379 void *data, int len, int *actual_length, int timeout);
94380 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
94381diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
94382index 9fd9e48..e2c5f35 100644
94383--- a/include/linux/usb/renesas_usbhs.h
94384+++ b/include/linux/usb/renesas_usbhs.h
94385@@ -39,7 +39,7 @@ enum {
94386 */
94387 struct renesas_usbhs_driver_callback {
94388 int (*notify_hotplug)(struct platform_device *pdev);
94389-};
94390+} __no_const;
94391
94392 /*
94393 * callback functions for platform
94394diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
94395index 8297e5b..0dfae27 100644
94396--- a/include/linux/user_namespace.h
94397+++ b/include/linux/user_namespace.h
94398@@ -39,7 +39,7 @@ struct user_namespace {
94399 struct key *persistent_keyring_register;
94400 struct rw_semaphore persistent_keyring_register_sem;
94401 #endif
94402-};
94403+} __randomize_layout;
94404
94405 extern struct user_namespace init_user_ns;
94406
94407diff --git a/include/linux/utsname.h b/include/linux/utsname.h
94408index 5093f58..c103e58 100644
94409--- a/include/linux/utsname.h
94410+++ b/include/linux/utsname.h
94411@@ -25,7 +25,7 @@ struct uts_namespace {
94412 struct new_utsname name;
94413 struct user_namespace *user_ns;
94414 struct ns_common ns;
94415-};
94416+} __randomize_layout;
94417 extern struct uts_namespace init_uts_ns;
94418
94419 #ifdef CONFIG_UTS_NS
94420diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
94421index 6f8fbcf..4efc177 100644
94422--- a/include/linux/vermagic.h
94423+++ b/include/linux/vermagic.h
94424@@ -25,9 +25,42 @@
94425 #define MODULE_ARCH_VERMAGIC ""
94426 #endif
94427
94428+#ifdef CONFIG_PAX_REFCOUNT
94429+#define MODULE_PAX_REFCOUNT "REFCOUNT "
94430+#else
94431+#define MODULE_PAX_REFCOUNT ""
94432+#endif
94433+
94434+#ifdef CONSTIFY_PLUGIN
94435+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
94436+#else
94437+#define MODULE_CONSTIFY_PLUGIN ""
94438+#endif
94439+
94440+#ifdef STACKLEAK_PLUGIN
94441+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
94442+#else
94443+#define MODULE_STACKLEAK_PLUGIN ""
94444+#endif
94445+
94446+#ifdef RANDSTRUCT_PLUGIN
94447+#include <generated/randomize_layout_hash.h>
94448+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
94449+#else
94450+#define MODULE_RANDSTRUCT_PLUGIN
94451+#endif
94452+
94453+#ifdef CONFIG_GRKERNSEC
94454+#define MODULE_GRSEC "GRSEC "
94455+#else
94456+#define MODULE_GRSEC ""
94457+#endif
94458+
94459 #define VERMAGIC_STRING \
94460 UTS_RELEASE " " \
94461 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
94462 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
94463- MODULE_ARCH_VERMAGIC
94464+ MODULE_ARCH_VERMAGIC \
94465+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
94466+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
94467
94468diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
94469index b483abd..af305ad 100644
94470--- a/include/linux/vga_switcheroo.h
94471+++ b/include/linux/vga_switcheroo.h
94472@@ -63,9 +63,9 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
94473
94474 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
94475
94476-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
94477+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
94478 void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
94479-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
94480+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
94481 #else
94482
94483 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
94484@@ -82,9 +82,9 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
94485
94486 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
94487
94488-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
94489+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
94490 static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
94491-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
94492+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
94493
94494 #endif
94495 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
94496diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
94497index 0ec5983..cc61051 100644
94498--- a/include/linux/vmalloc.h
94499+++ b/include/linux/vmalloc.h
94500@@ -18,6 +18,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
94501 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
94502 #define VM_NO_GUARD 0x00000040 /* don't add guard page */
94503 #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
94504+
94505+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
94506+#define VM_KERNEXEC 0x00000100 /* allocate from executable kernel memory range */
94507+#endif
94508+
94509 /* bits [20..32] reserved for arch specific ioremap internals */
94510
94511 /*
94512@@ -86,6 +91,10 @@ extern void *vmap(struct page **pages, unsigned int count,
94513 unsigned long flags, pgprot_t prot);
94514 extern void vunmap(const void *addr);
94515
94516+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
94517+extern void unmap_process_stacks(struct task_struct *task);
94518+#endif
94519+
94520 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
94521 unsigned long uaddr, void *kaddr,
94522 unsigned long size);
94523@@ -150,7 +159,7 @@ extern void free_vm_area(struct vm_struct *area);
94524
94525 /* for /dev/kmem */
94526 extern long vread(char *buf, char *addr, unsigned long count);
94527-extern long vwrite(char *buf, char *addr, unsigned long count);
94528+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
94529
94530 /*
94531 * Internals. Dont't use..
94532diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
94533index 82e7db7..f8ce3d0 100644
94534--- a/include/linux/vmstat.h
94535+++ b/include/linux/vmstat.h
94536@@ -108,18 +108,18 @@ static inline void vm_events_fold_cpu(int cpu)
94537 /*
94538 * Zone based page accounting with per cpu differentials.
94539 */
94540-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
94541+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
94542
94543 static inline void zone_page_state_add(long x, struct zone *zone,
94544 enum zone_stat_item item)
94545 {
94546- atomic_long_add(x, &zone->vm_stat[item]);
94547- atomic_long_add(x, &vm_stat[item]);
94548+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
94549+ atomic_long_add_unchecked(x, &vm_stat[item]);
94550 }
94551
94552-static inline unsigned long global_page_state(enum zone_stat_item item)
94553+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
94554 {
94555- long x = atomic_long_read(&vm_stat[item]);
94556+ long x = atomic_long_read_unchecked(&vm_stat[item]);
94557 #ifdef CONFIG_SMP
94558 if (x < 0)
94559 x = 0;
94560@@ -127,10 +127,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
94561 return x;
94562 }
94563
94564-static inline unsigned long zone_page_state(struct zone *zone,
94565+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
94566 enum zone_stat_item item)
94567 {
94568- long x = atomic_long_read(&zone->vm_stat[item]);
94569+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
94570 #ifdef CONFIG_SMP
94571 if (x < 0)
94572 x = 0;
94573@@ -147,7 +147,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
94574 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
94575 enum zone_stat_item item)
94576 {
94577- long x = atomic_long_read(&zone->vm_stat[item]);
94578+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
94579
94580 #ifdef CONFIG_SMP
94581 int cpu;
94582@@ -234,14 +234,14 @@ static inline void __mod_zone_page_state(struct zone *zone,
94583
94584 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
94585 {
94586- atomic_long_inc(&zone->vm_stat[item]);
94587- atomic_long_inc(&vm_stat[item]);
94588+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
94589+ atomic_long_inc_unchecked(&vm_stat[item]);
94590 }
94591
94592 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
94593 {
94594- atomic_long_dec(&zone->vm_stat[item]);
94595- atomic_long_dec(&vm_stat[item]);
94596+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
94597+ atomic_long_dec_unchecked(&vm_stat[item]);
94598 }
94599
94600 static inline void __inc_zone_page_state(struct page *page,
94601diff --git a/include/linux/xattr.h b/include/linux/xattr.h
94602index 91b0a68..0e9adf6 100644
94603--- a/include/linux/xattr.h
94604+++ b/include/linux/xattr.h
94605@@ -28,7 +28,7 @@ struct xattr_handler {
94606 size_t size, int handler_flags);
94607 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
94608 size_t size, int flags, int handler_flags);
94609-};
94610+} __do_const;
94611
94612 struct xattr {
94613 const char *name;
94614@@ -37,6 +37,9 @@ struct xattr {
94615 };
94616
94617 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
94618+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
94619+ssize_t pax_getxattr(struct dentry *, void *, size_t);
94620+#endif
94621 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
94622 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
94623 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
94624diff --git a/include/linux/zlib.h b/include/linux/zlib.h
94625index 92dbbd3..13ab0b3 100644
94626--- a/include/linux/zlib.h
94627+++ b/include/linux/zlib.h
94628@@ -31,6 +31,7 @@
94629 #define _ZLIB_H
94630
94631 #include <linux/zconf.h>
94632+#include <linux/compiler.h>
94633
94634 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
94635 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
94636@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
94637
94638 /* basic functions */
94639
94640-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
94641+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
94642 /*
94643 Returns the number of bytes that needs to be allocated for a per-
94644 stream workspace with the specified parameters. A pointer to this
94645diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
94646index 3e4fddf..5ec9104 100644
94647--- a/include/media/v4l2-dev.h
94648+++ b/include/media/v4l2-dev.h
94649@@ -75,7 +75,7 @@ struct v4l2_file_operations {
94650 int (*mmap) (struct file *, struct vm_area_struct *);
94651 int (*open) (struct file *);
94652 int (*release) (struct file *);
94653-};
94654+} __do_const;
94655
94656 /*
94657 * Newer version of video_device, handled by videodev2.c
94658diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
94659index ffb69da..040393e 100644
94660--- a/include/media/v4l2-device.h
94661+++ b/include/media/v4l2-device.h
94662@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
94663 this function returns 0. If the name ends with a digit (e.g. cx18),
94664 then the name will be set to cx18-0 since cx180 looks really odd. */
94665 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
94666- atomic_t *instance);
94667+ atomic_unchecked_t *instance);
94668
94669 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
94670 Since the parent disappears this ensures that v4l2_dev doesn't have an
94671diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
94672index 2a25dec..bf6dd8a 100644
94673--- a/include/net/9p/transport.h
94674+++ b/include/net/9p/transport.h
94675@@ -62,7 +62,7 @@ struct p9_trans_module {
94676 int (*cancelled)(struct p9_client *, struct p9_req_t *req);
94677 int (*zc_request)(struct p9_client *, struct p9_req_t *,
94678 char *, char *, int , int, int, int);
94679-};
94680+} __do_const;
94681
94682 void v9fs_register_trans(struct p9_trans_module *m);
94683 void v9fs_unregister_trans(struct p9_trans_module *m);
94684diff --git a/include/net/af_unix.h b/include/net/af_unix.h
94685index a175ba4..196eb8242 100644
94686--- a/include/net/af_unix.h
94687+++ b/include/net/af_unix.h
94688@@ -36,7 +36,7 @@ struct unix_skb_parms {
94689 u32 secid; /* Security ID */
94690 #endif
94691 u32 consumed;
94692-};
94693+} __randomize_layout;
94694
94695 #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
94696 #define UNIXSID(skb) (&UNIXCB((skb)).secid)
94697diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
94698index 2239a37..a83461f 100644
94699--- a/include/net/bluetooth/l2cap.h
94700+++ b/include/net/bluetooth/l2cap.h
94701@@ -609,7 +609,7 @@ struct l2cap_ops {
94702 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
94703 unsigned long hdr_len,
94704 unsigned long len, int nb);
94705-};
94706+} __do_const;
94707
94708 struct l2cap_conn {
94709 struct hci_conn *hcon;
94710diff --git a/include/net/bonding.h b/include/net/bonding.h
94711index fda6fee..dbdf83c 100644
94712--- a/include/net/bonding.h
94713+++ b/include/net/bonding.h
94714@@ -665,7 +665,7 @@ extern struct rtnl_link_ops bond_link_ops;
94715
94716 static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
94717 {
94718- atomic_long_inc(&dev->tx_dropped);
94719+ atomic_long_inc_unchecked(&dev->tx_dropped);
94720 dev_kfree_skb_any(skb);
94721 }
94722
94723diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
94724index f2ae33d..c457cf0 100644
94725--- a/include/net/caif/cfctrl.h
94726+++ b/include/net/caif/cfctrl.h
94727@@ -52,7 +52,7 @@ struct cfctrl_rsp {
94728 void (*radioset_rsp)(void);
94729 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
94730 struct cflayer *client_layer);
94731-};
94732+} __no_const;
94733
94734 /* Link Setup Parameters for CAIF-Links. */
94735 struct cfctrl_link_param {
94736@@ -101,8 +101,8 @@ struct cfctrl_request_info {
94737 struct cfctrl {
94738 struct cfsrvl serv;
94739 struct cfctrl_rsp res;
94740- atomic_t req_seq_no;
94741- atomic_t rsp_seq_no;
94742+ atomic_unchecked_t req_seq_no;
94743+ atomic_unchecked_t rsp_seq_no;
94744 struct list_head list;
94745 /* Protects from simultaneous access to first_req list */
94746 spinlock_t info_list_lock;
94747diff --git a/include/net/flow.h b/include/net/flow.h
94748index 8109a15..504466d 100644
94749--- a/include/net/flow.h
94750+++ b/include/net/flow.h
94751@@ -231,6 +231,6 @@ void flow_cache_fini(struct net *net);
94752
94753 void flow_cache_flush(struct net *net);
94754 void flow_cache_flush_deferred(struct net *net);
94755-extern atomic_t flow_cache_genid;
94756+extern atomic_unchecked_t flow_cache_genid;
94757
94758 #endif
94759diff --git a/include/net/genetlink.h b/include/net/genetlink.h
94760index 0574abd..0f16881 100644
94761--- a/include/net/genetlink.h
94762+++ b/include/net/genetlink.h
94763@@ -130,7 +130,7 @@ struct genl_ops {
94764 u8 cmd;
94765 u8 internal_flags;
94766 u8 flags;
94767-};
94768+} __do_const;
94769
94770 int __genl_register_family(struct genl_family *family);
94771
94772diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
94773index 0f712c0..cd762c4 100644
94774--- a/include/net/gro_cells.h
94775+++ b/include/net/gro_cells.h
94776@@ -27,7 +27,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
94777 cell = this_cpu_ptr(gcells->cells);
94778
94779 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
94780- atomic_long_inc(&dev->rx_dropped);
94781+ atomic_long_inc_unchecked(&dev->rx_dropped);
94782 kfree_skb(skb);
94783 return;
94784 }
94785diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
94786index 5976bde..3a81660 100644
94787--- a/include/net/inet_connection_sock.h
94788+++ b/include/net/inet_connection_sock.h
94789@@ -63,7 +63,7 @@ struct inet_connection_sock_af_ops {
94790 int (*bind_conflict)(const struct sock *sk,
94791 const struct inet_bind_bucket *tb, bool relax);
94792 void (*mtu_reduced)(struct sock *sk);
94793-};
94794+} __do_const;
94795
94796 /** inet_connection_sock - INET connection oriented sock
94797 *
94798diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
94799index 80479ab..0c3f647 100644
94800--- a/include/net/inetpeer.h
94801+++ b/include/net/inetpeer.h
94802@@ -47,7 +47,7 @@ struct inet_peer {
94803 */
94804 union {
94805 struct {
94806- atomic_t rid; /* Frag reception counter */
94807+ atomic_unchecked_t rid; /* Frag reception counter */
94808 };
94809 struct rcu_head rcu;
94810 struct inet_peer *gc_next;
94811diff --git a/include/net/ip.h b/include/net/ip.h
94812index 6cc1eaf..14059b0 100644
94813--- a/include/net/ip.h
94814+++ b/include/net/ip.h
94815@@ -317,7 +317,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
94816 }
94817 }
94818
94819-u32 ip_idents_reserve(u32 hash, int segs);
94820+u32 ip_idents_reserve(u32 hash, int segs) __intentional_overflow(-1);
94821 void __ip_select_ident(struct iphdr *iph, int segs);
94822
94823 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
94824diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
94825index 5bd120e4..03fb812 100644
94826--- a/include/net/ip_fib.h
94827+++ b/include/net/ip_fib.h
94828@@ -170,7 +170,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
94829
94830 #define FIB_RES_SADDR(net, res) \
94831 ((FIB_RES_NH(res).nh_saddr_genid == \
94832- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
94833+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
94834 FIB_RES_NH(res).nh_saddr : \
94835 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
94836 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
94837diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
94838index 615b20b..fd4cbd8 100644
94839--- a/include/net/ip_vs.h
94840+++ b/include/net/ip_vs.h
94841@@ -534,7 +534,7 @@ struct ip_vs_conn {
94842 struct ip_vs_conn *control; /* Master control connection */
94843 atomic_t n_control; /* Number of controlled ones */
94844 struct ip_vs_dest *dest; /* real server */
94845- atomic_t in_pkts; /* incoming packet counter */
94846+ atomic_unchecked_t in_pkts; /* incoming packet counter */
94847
94848 /* Packet transmitter for different forwarding methods. If it
94849 * mangles the packet, it must return NF_DROP or better NF_STOLEN,
94850@@ -682,7 +682,7 @@ struct ip_vs_dest {
94851 __be16 port; /* port number of the server */
94852 union nf_inet_addr addr; /* IP address of the server */
94853 volatile unsigned int flags; /* dest status flags */
94854- atomic_t conn_flags; /* flags to copy to conn */
94855+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
94856 atomic_t weight; /* server weight */
94857
94858 atomic_t refcnt; /* reference counter */
94859@@ -928,11 +928,11 @@ struct netns_ipvs {
94860 /* ip_vs_lblc */
94861 int sysctl_lblc_expiration;
94862 struct ctl_table_header *lblc_ctl_header;
94863- struct ctl_table *lblc_ctl_table;
94864+ ctl_table_no_const *lblc_ctl_table;
94865 /* ip_vs_lblcr */
94866 int sysctl_lblcr_expiration;
94867 struct ctl_table_header *lblcr_ctl_header;
94868- struct ctl_table *lblcr_ctl_table;
94869+ ctl_table_no_const *lblcr_ctl_table;
94870 /* ip_vs_est */
94871 struct list_head est_list; /* estimator list */
94872 spinlock_t est_lock;
94873diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
94874index 8d4f588..2e37ad2 100644
94875--- a/include/net/irda/ircomm_tty.h
94876+++ b/include/net/irda/ircomm_tty.h
94877@@ -33,6 +33,7 @@
94878 #include <linux/termios.h>
94879 #include <linux/timer.h>
94880 #include <linux/tty.h> /* struct tty_struct */
94881+#include <asm/local.h>
94882
94883 #include <net/irda/irias_object.h>
94884 #include <net/irda/ircomm_core.h>
94885diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
94886index 714cc9a..ea05f3e 100644
94887--- a/include/net/iucv/af_iucv.h
94888+++ b/include/net/iucv/af_iucv.h
94889@@ -149,7 +149,7 @@ struct iucv_skb_cb {
94890 struct iucv_sock_list {
94891 struct hlist_head head;
94892 rwlock_t lock;
94893- atomic_t autobind_name;
94894+ atomic_unchecked_t autobind_name;
94895 };
94896
94897 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
94898diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
94899index f3be818..bf46196 100644
94900--- a/include/net/llc_c_ac.h
94901+++ b/include/net/llc_c_ac.h
94902@@ -87,7 +87,7 @@
94903 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
94904 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
94905
94906-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
94907+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
94908
94909 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
94910 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
94911diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
94912index 3948cf1..83b28c4 100644
94913--- a/include/net/llc_c_ev.h
94914+++ b/include/net/llc_c_ev.h
94915@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
94916 return (struct llc_conn_state_ev *)skb->cb;
94917 }
94918
94919-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
94920-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
94921+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
94922+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
94923
94924 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
94925 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
94926diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
94927index 48f3f89..0e92c50 100644
94928--- a/include/net/llc_c_st.h
94929+++ b/include/net/llc_c_st.h
94930@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
94931 u8 next_state;
94932 const llc_conn_ev_qfyr_t *ev_qualifiers;
94933 const llc_conn_action_t *ev_actions;
94934-};
94935+} __do_const;
94936
94937 struct llc_conn_state {
94938 u8 current_state;
94939diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
94940index a61b98c..aade1eb 100644
94941--- a/include/net/llc_s_ac.h
94942+++ b/include/net/llc_s_ac.h
94943@@ -23,7 +23,7 @@
94944 #define SAP_ACT_TEST_IND 9
94945
94946 /* All action functions must look like this */
94947-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
94948+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
94949
94950 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
94951 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
94952diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
94953index c4359e2..76dbc4a 100644
94954--- a/include/net/llc_s_st.h
94955+++ b/include/net/llc_s_st.h
94956@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
94957 llc_sap_ev_t ev;
94958 u8 next_state;
94959 const llc_sap_action_t *ev_actions;
94960-};
94961+} __do_const;
94962
94963 struct llc_sap_state {
94964 u8 curr_state;
94965diff --git a/include/net/mac80211.h b/include/net/mac80211.h
94966index d52914b..2b13cec 100644
94967--- a/include/net/mac80211.h
94968+++ b/include/net/mac80211.h
94969@@ -4915,7 +4915,7 @@ struct rate_control_ops {
94970 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
94971
94972 u32 (*get_expected_throughput)(void *priv_sta);
94973-};
94974+} __do_const;
94975
94976 static inline int rate_supported(struct ieee80211_sta *sta,
94977 enum ieee80211_band band,
94978diff --git a/include/net/neighbour.h b/include/net/neighbour.h
94979index 76f7084..8f36e39 100644
94980--- a/include/net/neighbour.h
94981+++ b/include/net/neighbour.h
94982@@ -163,7 +163,7 @@ struct neigh_ops {
94983 void (*error_report)(struct neighbour *, struct sk_buff *);
94984 int (*output)(struct neighbour *, struct sk_buff *);
94985 int (*connected_output)(struct neighbour *, struct sk_buff *);
94986-};
94987+} __do_const;
94988
94989 struct pneigh_entry {
94990 struct pneigh_entry *next;
94991@@ -217,7 +217,7 @@ struct neigh_table {
94992 struct neigh_statistics __percpu *stats;
94993 struct neigh_hash_table __rcu *nht;
94994 struct pneigh_entry **phash_buckets;
94995-};
94996+} __randomize_layout;
94997
94998 enum {
94999 NEIGH_ARP_TABLE = 0,
95000diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
95001index 36faf49..6927638 100644
95002--- a/include/net/net_namespace.h
95003+++ b/include/net/net_namespace.h
95004@@ -131,8 +131,8 @@ struct net {
95005 struct netns_ipvs *ipvs;
95006 #endif
95007 struct sock *diag_nlsk;
95008- atomic_t fnhe_genid;
95009-};
95010+ atomic_unchecked_t fnhe_genid;
95011+} __randomize_layout;
95012
95013 #include <linux/seq_file_net.h>
95014
95015@@ -288,7 +288,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
95016 #define __net_init __init
95017 #define __net_exit __exit_refok
95018 #define __net_initdata __initdata
95019+#ifdef CONSTIFY_PLUGIN
95020 #define __net_initconst __initconst
95021+#else
95022+#define __net_initconst __initdata
95023+#endif
95024 #endif
95025
95026 int peernet2id(struct net *net, struct net *peer);
95027@@ -301,7 +305,7 @@ struct pernet_operations {
95028 void (*exit_batch)(struct list_head *net_exit_list);
95029 int *id;
95030 size_t size;
95031-};
95032+} __do_const;
95033
95034 /*
95035 * Use these carefully. If you implement a network device and it
95036@@ -349,12 +353,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
95037
95038 static inline int rt_genid_ipv4(struct net *net)
95039 {
95040- return atomic_read(&net->ipv4.rt_genid);
95041+ return atomic_read_unchecked(&net->ipv4.rt_genid);
95042 }
95043
95044 static inline void rt_genid_bump_ipv4(struct net *net)
95045 {
95046- atomic_inc(&net->ipv4.rt_genid);
95047+ atomic_inc_unchecked(&net->ipv4.rt_genid);
95048 }
95049
95050 extern void (*__fib6_flush_trees)(struct net *net);
95051@@ -381,12 +385,12 @@ static inline void rt_genid_bump_all(struct net *net)
95052
95053 static inline int fnhe_genid(struct net *net)
95054 {
95055- return atomic_read(&net->fnhe_genid);
95056+ return atomic_read_unchecked(&net->fnhe_genid);
95057 }
95058
95059 static inline void fnhe_genid_bump(struct net *net)
95060 {
95061- atomic_inc(&net->fnhe_genid);
95062+ atomic_inc_unchecked(&net->fnhe_genid);
95063 }
95064
95065 #endif /* __NET_NET_NAMESPACE_H */
95066diff --git a/include/net/netlink.h b/include/net/netlink.h
95067index e010ee8..405b9f4 100644
95068--- a/include/net/netlink.h
95069+++ b/include/net/netlink.h
95070@@ -518,7 +518,7 @@ static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
95071 {
95072 if (mark) {
95073 WARN_ON((unsigned char *) mark < skb->data);
95074- skb_trim(skb, (unsigned char *) mark - skb->data);
95075+ skb_trim(skb, (const unsigned char *) mark - skb->data);
95076 }
95077 }
95078
95079diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
95080index 29d6a94..235d3d84 100644
95081--- a/include/net/netns/conntrack.h
95082+++ b/include/net/netns/conntrack.h
95083@@ -14,10 +14,10 @@ struct nf_conntrack_ecache;
95084 struct nf_proto_net {
95085 #ifdef CONFIG_SYSCTL
95086 struct ctl_table_header *ctl_table_header;
95087- struct ctl_table *ctl_table;
95088+ ctl_table_no_const *ctl_table;
95089 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
95090 struct ctl_table_header *ctl_compat_header;
95091- struct ctl_table *ctl_compat_table;
95092+ ctl_table_no_const *ctl_compat_table;
95093 #endif
95094 #endif
95095 unsigned int users;
95096@@ -60,7 +60,7 @@ struct nf_ip_net {
95097 struct nf_icmp_net icmpv6;
95098 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
95099 struct ctl_table_header *ctl_table_header;
95100- struct ctl_table *ctl_table;
95101+ ctl_table_no_const *ctl_table;
95102 #endif
95103 };
95104
95105diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
95106index dbe2254..ed0c151 100644
95107--- a/include/net/netns/ipv4.h
95108+++ b/include/net/netns/ipv4.h
95109@@ -87,7 +87,7 @@ struct netns_ipv4 {
95110
95111 struct ping_group_range ping_group_range;
95112
95113- atomic_t dev_addr_genid;
95114+ atomic_unchecked_t dev_addr_genid;
95115
95116 #ifdef CONFIG_SYSCTL
95117 unsigned long *sysctl_local_reserved_ports;
95118@@ -101,6 +101,6 @@ struct netns_ipv4 {
95119 struct fib_rules_ops *mr_rules_ops;
95120 #endif
95121 #endif
95122- atomic_t rt_genid;
95123+ atomic_unchecked_t rt_genid;
95124 };
95125 #endif
95126diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
95127index 69ae41f..4f94868 100644
95128--- a/include/net/netns/ipv6.h
95129+++ b/include/net/netns/ipv6.h
95130@@ -75,8 +75,8 @@ struct netns_ipv6 {
95131 struct fib_rules_ops *mr6_rules_ops;
95132 #endif
95133 #endif
95134- atomic_t dev_addr_genid;
95135- atomic_t fib6_sernum;
95136+ atomic_unchecked_t dev_addr_genid;
95137+ atomic_unchecked_t fib6_sernum;
95138 };
95139
95140 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
95141diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
95142index 730d82a..045f2c4 100644
95143--- a/include/net/netns/xfrm.h
95144+++ b/include/net/netns/xfrm.h
95145@@ -78,7 +78,7 @@ struct netns_xfrm {
95146
95147 /* flow cache part */
95148 struct flow_cache flow_cache_global;
95149- atomic_t flow_cache_genid;
95150+ atomic_unchecked_t flow_cache_genid;
95151 struct list_head flow_cache_gc_list;
95152 spinlock_t flow_cache_gc_lock;
95153 struct work_struct flow_cache_gc_work;
95154diff --git a/include/net/ping.h b/include/net/ping.h
95155index cc16d41..664f40b 100644
95156--- a/include/net/ping.h
95157+++ b/include/net/ping.h
95158@@ -54,7 +54,7 @@ struct ping_iter_state {
95159
95160 extern struct proto ping_prot;
95161 #if IS_ENABLED(CONFIG_IPV6)
95162-extern struct pingv6_ops pingv6_ops;
95163+extern struct pingv6_ops *pingv6_ops;
95164 #endif
95165
95166 struct pingfakehdr {
95167diff --git a/include/net/protocol.h b/include/net/protocol.h
95168index d6fcc1f..ca277058 100644
95169--- a/include/net/protocol.h
95170+++ b/include/net/protocol.h
95171@@ -49,7 +49,7 @@ struct net_protocol {
95172 * socket lookup?
95173 */
95174 icmp_strict_tag_validation:1;
95175-};
95176+} __do_const;
95177
95178 #if IS_ENABLED(CONFIG_IPV6)
95179 struct inet6_protocol {
95180@@ -62,7 +62,7 @@ struct inet6_protocol {
95181 u8 type, u8 code, int offset,
95182 __be32 info);
95183 unsigned int flags; /* INET6_PROTO_xxx */
95184-};
95185+} __do_const;
95186
95187 #define INET6_PROTO_NOPOLICY 0x1
95188 #define INET6_PROTO_FINAL 0x2
95189diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
95190index 6c6d539..af70817 100644
95191--- a/include/net/rtnetlink.h
95192+++ b/include/net/rtnetlink.h
95193@@ -95,7 +95,7 @@ struct rtnl_link_ops {
95194 const struct net_device *dev,
95195 const struct net_device *slave_dev);
95196 struct net *(*get_link_net)(const struct net_device *dev);
95197-};
95198+} __do_const;
95199
95200 int __rtnl_link_register(struct rtnl_link_ops *ops);
95201 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
95202diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
95203index 4a5b9a3..ca27d73 100644
95204--- a/include/net/sctp/checksum.h
95205+++ b/include/net/sctp/checksum.h
95206@@ -61,8 +61,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
95207 unsigned int offset)
95208 {
95209 struct sctphdr *sh = sctp_hdr(skb);
95210- __le32 ret, old = sh->checksum;
95211- const struct skb_checksum_ops ops = {
95212+ __le32 ret, old = sh->checksum;
95213+ static const struct skb_checksum_ops ops = {
95214 .update = sctp_csum_update,
95215 .combine = sctp_csum_combine,
95216 };
95217diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
95218index 487ef34..d457f98 100644
95219--- a/include/net/sctp/sm.h
95220+++ b/include/net/sctp/sm.h
95221@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
95222 typedef struct {
95223 sctp_state_fn_t *fn;
95224 const char *name;
95225-} sctp_sm_table_entry_t;
95226+} __do_const sctp_sm_table_entry_t;
95227
95228 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
95229 * currently in use.
95230@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
95231 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
95232
95233 /* Extern declarations for major data structures. */
95234-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
95235+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
95236
95237
95238 /* Get the size of a DATA chunk payload. */
95239diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
95240index 2bb2fcf..d17c291 100644
95241--- a/include/net/sctp/structs.h
95242+++ b/include/net/sctp/structs.h
95243@@ -509,7 +509,7 @@ struct sctp_pf {
95244 void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
95245 void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
95246 struct sctp_af *af;
95247-};
95248+} __do_const;
95249
95250
95251 /* Structure to track chunk fragments that have been acked, but peer
95252diff --git a/include/net/sock.h b/include/net/sock.h
95253index e4079c2..79c5d3a 100644
95254--- a/include/net/sock.h
95255+++ b/include/net/sock.h
95256@@ -362,7 +362,7 @@ struct sock {
95257 unsigned int sk_napi_id;
95258 unsigned int sk_ll_usec;
95259 #endif
95260- atomic_t sk_drops;
95261+ atomic_unchecked_t sk_drops;
95262 int sk_rcvbuf;
95263
95264 struct sk_filter __rcu *sk_filter;
95265@@ -1039,7 +1039,7 @@ struct proto {
95266 void (*destroy_cgroup)(struct mem_cgroup *memcg);
95267 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
95268 #endif
95269-};
95270+} __randomize_layout;
95271
95272 /*
95273 * Bits in struct cg_proto.flags
95274@@ -1212,7 +1212,7 @@ static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
95275 page_counter_uncharge(&prot->memory_allocated, amt);
95276 }
95277
95278-static inline long
95279+static inline long __intentional_overflow(-1)
95280 sk_memory_allocated(const struct sock *sk)
95281 {
95282 struct proto *prot = sk->sk_prot;
95283@@ -1778,7 +1778,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
95284 }
95285
95286 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
95287- struct iov_iter *from, char *to,
95288+ struct iov_iter *from, unsigned char *to,
95289 int copy, int offset)
95290 {
95291 if (skb->ip_summed == CHECKSUM_NONE) {
95292@@ -2025,7 +2025,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
95293 }
95294 }
95295
95296-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
95297+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
95298
95299 /**
95300 * sk_page_frag - return an appropriate page_frag
95301diff --git a/include/net/tcp.h b/include/net/tcp.h
95302index 8d6b983..5813205 100644
95303--- a/include/net/tcp.h
95304+++ b/include/net/tcp.h
95305@@ -516,7 +516,7 @@ void tcp_retransmit_timer(struct sock *sk);
95306 void tcp_xmit_retransmit_queue(struct sock *);
95307 void tcp_simple_retransmit(struct sock *);
95308 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
95309-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
95310+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
95311
95312 void tcp_send_probe0(struct sock *);
95313 void tcp_send_partial(struct sock *);
95314@@ -694,8 +694,8 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
95315 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
95316 */
95317 struct tcp_skb_cb {
95318- __u32 seq; /* Starting sequence number */
95319- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
95320+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
95321+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
95322 union {
95323 /* Note : tcp_tw_isn is used in input path only
95324 * (isn chosen by tcp_timewait_state_process())
95325@@ -720,7 +720,7 @@ struct tcp_skb_cb {
95326
95327 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
95328 /* 1 byte hole */
95329- __u32 ack_seq; /* Sequence number ACK'd */
95330+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
95331 union {
95332 struct inet_skb_parm h4;
95333 #if IS_ENABLED(CONFIG_IPV6)
95334diff --git a/include/net/xfrm.h b/include/net/xfrm.h
95335index dc4865e..152ee4c 100644
95336--- a/include/net/xfrm.h
95337+++ b/include/net/xfrm.h
95338@@ -285,7 +285,6 @@ struct xfrm_dst;
95339 struct xfrm_policy_afinfo {
95340 unsigned short family;
95341 struct dst_ops *dst_ops;
95342- void (*garbage_collect)(struct net *net);
95343 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
95344 const xfrm_address_t *saddr,
95345 const xfrm_address_t *daddr);
95346@@ -303,7 +302,7 @@ struct xfrm_policy_afinfo {
95347 struct net_device *dev,
95348 const struct flowi *fl);
95349 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
95350-};
95351+} __do_const;
95352
95353 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
95354 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
95355@@ -342,7 +341,7 @@ struct xfrm_state_afinfo {
95356 int (*transport_finish)(struct sk_buff *skb,
95357 int async);
95358 void (*local_error)(struct sk_buff *skb, u32 mtu);
95359-};
95360+} __do_const;
95361
95362 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
95363 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
95364@@ -437,7 +436,7 @@ struct xfrm_mode {
95365 struct module *owner;
95366 unsigned int encap;
95367 int flags;
95368-};
95369+} __do_const;
95370
95371 /* Flags for xfrm_mode. */
95372 enum {
95373@@ -534,7 +533,7 @@ struct xfrm_policy {
95374 struct timer_list timer;
95375
95376 struct flow_cache_object flo;
95377- atomic_t genid;
95378+ atomic_unchecked_t genid;
95379 u32 priority;
95380 u32 index;
95381 struct xfrm_mark mark;
95382@@ -1167,6 +1166,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
95383 }
95384
95385 void xfrm_garbage_collect(struct net *net);
95386+void xfrm_garbage_collect_deferred(struct net *net);
95387
95388 #else
95389
95390@@ -1205,6 +1205,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
95391 static inline void xfrm_garbage_collect(struct net *net)
95392 {
95393 }
95394+static inline void xfrm_garbage_collect_deferred(struct net *net)
95395+{
95396+}
95397 #endif
95398
95399 static __inline__
95400diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
95401index 1017e0b..227aa4d 100644
95402--- a/include/rdma/iw_cm.h
95403+++ b/include/rdma/iw_cm.h
95404@@ -122,7 +122,7 @@ struct iw_cm_verbs {
95405 int backlog);
95406
95407 int (*destroy_listen)(struct iw_cm_id *cm_id);
95408-};
95409+} __no_const;
95410
95411 /**
95412 * iw_create_cm_id - Create an IW CM identifier.
95413diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
95414index 93d14da..734b3d8 100644
95415--- a/include/scsi/libfc.h
95416+++ b/include/scsi/libfc.h
95417@@ -771,6 +771,7 @@ struct libfc_function_template {
95418 */
95419 void (*disc_stop_final) (struct fc_lport *);
95420 };
95421+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
95422
95423 /**
95424 * struct fc_disc - Discovery context
95425@@ -875,7 +876,7 @@ struct fc_lport {
95426 struct fc_vport *vport;
95427
95428 /* Operational Information */
95429- struct libfc_function_template tt;
95430+ libfc_function_template_no_const tt;
95431 u8 link_up;
95432 u8 qfull;
95433 enum fc_lport_state state;
95434diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
95435index a4c9336..d6f8f34 100644
95436--- a/include/scsi/scsi_device.h
95437+++ b/include/scsi/scsi_device.h
95438@@ -185,9 +185,9 @@ struct scsi_device {
95439 unsigned int max_device_blocked; /* what device_blocked counts down from */
95440 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
95441
95442- atomic_t iorequest_cnt;
95443- atomic_t iodone_cnt;
95444- atomic_t ioerr_cnt;
95445+ atomic_unchecked_t iorequest_cnt;
95446+ atomic_unchecked_t iodone_cnt;
95447+ atomic_unchecked_t ioerr_cnt;
95448
95449 struct device sdev_gendev,
95450 sdev_dev;
95451diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
95452index 007a0bc..7188db8 100644
95453--- a/include/scsi/scsi_transport_fc.h
95454+++ b/include/scsi/scsi_transport_fc.h
95455@@ -756,7 +756,8 @@ struct fc_function_template {
95456 unsigned long show_host_system_hostname:1;
95457
95458 unsigned long disable_target_scan:1;
95459-};
95460+} __do_const;
95461+typedef struct fc_function_template __no_const fc_function_template_no_const;
95462
95463
95464 /**
95465diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
95466index f48089d..73abe48 100644
95467--- a/include/sound/compress_driver.h
95468+++ b/include/sound/compress_driver.h
95469@@ -130,7 +130,7 @@ struct snd_compr_ops {
95470 struct snd_compr_caps *caps);
95471 int (*get_codec_caps) (struct snd_compr_stream *stream,
95472 struct snd_compr_codec_caps *codec);
95473-};
95474+} __no_const;
95475
95476 /**
95477 * struct snd_compr: Compressed device
95478diff --git a/include/sound/soc.h b/include/sound/soc.h
95479index 0d1ade1..34e77d3 100644
95480--- a/include/sound/soc.h
95481+++ b/include/sound/soc.h
95482@@ -856,7 +856,7 @@ struct snd_soc_codec_driver {
95483 enum snd_soc_dapm_type, int);
95484
95485 bool ignore_pmdown_time; /* Doesn't benefit from pmdown delay */
95486-};
95487+} __do_const;
95488
95489 /* SoC platform interface */
95490 struct snd_soc_platform_driver {
95491@@ -883,7 +883,7 @@ struct snd_soc_platform_driver {
95492 const struct snd_compr_ops *compr_ops;
95493
95494 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
95495-};
95496+} __do_const;
95497
95498 struct snd_soc_dai_link_component {
95499 const char *name;
95500diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
95501index 985ca4c..b55b54a 100644
95502--- a/include/target/target_core_base.h
95503+++ b/include/target/target_core_base.h
95504@@ -767,7 +767,7 @@ struct se_device {
95505 atomic_long_t write_bytes;
95506 /* Active commands on this virtual SE device */
95507 atomic_t simple_cmds;
95508- atomic_t dev_ordered_id;
95509+ atomic_unchecked_t dev_ordered_id;
95510 atomic_t dev_ordered_sync;
95511 atomic_t dev_qf_count;
95512 int export_count;
95513diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
95514new file mode 100644
95515index 0000000..fb634b7
95516--- /dev/null
95517+++ b/include/trace/events/fs.h
95518@@ -0,0 +1,53 @@
95519+#undef TRACE_SYSTEM
95520+#define TRACE_SYSTEM fs
95521+
95522+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
95523+#define _TRACE_FS_H
95524+
95525+#include <linux/fs.h>
95526+#include <linux/tracepoint.h>
95527+
95528+TRACE_EVENT(do_sys_open,
95529+
95530+ TP_PROTO(const char *filename, int flags, int mode),
95531+
95532+ TP_ARGS(filename, flags, mode),
95533+
95534+ TP_STRUCT__entry(
95535+ __string( filename, filename )
95536+ __field( int, flags )
95537+ __field( int, mode )
95538+ ),
95539+
95540+ TP_fast_assign(
95541+ __assign_str(filename, filename);
95542+ __entry->flags = flags;
95543+ __entry->mode = mode;
95544+ ),
95545+
95546+ TP_printk("\"%s\" %x %o",
95547+ __get_str(filename), __entry->flags, __entry->mode)
95548+);
95549+
95550+TRACE_EVENT(open_exec,
95551+
95552+ TP_PROTO(const char *filename),
95553+
95554+ TP_ARGS(filename),
95555+
95556+ TP_STRUCT__entry(
95557+ __string( filename, filename )
95558+ ),
95559+
95560+ TP_fast_assign(
95561+ __assign_str(filename, filename);
95562+ ),
95563+
95564+ TP_printk("\"%s\"",
95565+ __get_str(filename))
95566+);
95567+
95568+#endif /* _TRACE_FS_H */
95569+
95570+/* This part must be outside protection */
95571+#include <trace/define_trace.h>
95572diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
95573index 3608beb..df39d8a 100644
95574--- a/include/trace/events/irq.h
95575+++ b/include/trace/events/irq.h
95576@@ -36,7 +36,7 @@ struct softirq_action;
95577 */
95578 TRACE_EVENT(irq_handler_entry,
95579
95580- TP_PROTO(int irq, struct irqaction *action),
95581+ TP_PROTO(int irq, const struct irqaction *action),
95582
95583 TP_ARGS(irq, action),
95584
95585@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
95586 */
95587 TRACE_EVENT(irq_handler_exit,
95588
95589- TP_PROTO(int irq, struct irqaction *action, int ret),
95590+ TP_PROTO(int irq, const struct irqaction *action, int ret),
95591
95592 TP_ARGS(irq, action, ret),
95593
95594diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
95595index 6eed16b..3e05750 100644
95596--- a/include/uapi/drm/i915_drm.h
95597+++ b/include/uapi/drm/i915_drm.h
95598@@ -347,6 +347,7 @@ typedef struct drm_i915_irq_wait {
95599 #define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
95600 #define I915_PARAM_MMAP_VERSION 30
95601 #define I915_PARAM_HAS_BSD2 31
95602+#define I915_PARAM_HAS_LEGACY_CONTEXT 35
95603
95604 typedef struct drm_i915_getparam {
95605 int param;
95606diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
95607index 7caf44c..23c6f27 100644
95608--- a/include/uapi/linux/a.out.h
95609+++ b/include/uapi/linux/a.out.h
95610@@ -39,6 +39,14 @@ enum machine_type {
95611 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
95612 };
95613
95614+/* Constants for the N_FLAGS field */
95615+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
95616+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
95617+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
95618+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
95619+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
95620+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
95621+
95622 #if !defined (N_MAGIC)
95623 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
95624 #endif
95625diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
95626index 22b6ad3..aeba37e 100644
95627--- a/include/uapi/linux/bcache.h
95628+++ b/include/uapi/linux/bcache.h
95629@@ -5,6 +5,7 @@
95630 * Bcache on disk data structures
95631 */
95632
95633+#include <linux/compiler.h>
95634 #include <asm/types.h>
95635
95636 #define BITMASK(name, type, field, offset, size) \
95637@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, __u64 v) \
95638 /* Btree keys - all units are in sectors */
95639
95640 struct bkey {
95641- __u64 high;
95642- __u64 low;
95643+ __u64 high __intentional_overflow(-1);
95644+ __u64 low __intentional_overflow(-1);
95645 __u64 ptr[];
95646 };
95647
95648diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
95649index d876736..ccce5c0 100644
95650--- a/include/uapi/linux/byteorder/little_endian.h
95651+++ b/include/uapi/linux/byteorder/little_endian.h
95652@@ -42,51 +42,51 @@
95653
95654 static inline __le64 __cpu_to_le64p(const __u64 *p)
95655 {
95656- return (__force __le64)*p;
95657+ return (__force const __le64)*p;
95658 }
95659-static inline __u64 __le64_to_cpup(const __le64 *p)
95660+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
95661 {
95662- return (__force __u64)*p;
95663+ return (__force const __u64)*p;
95664 }
95665 static inline __le32 __cpu_to_le32p(const __u32 *p)
95666 {
95667- return (__force __le32)*p;
95668+ return (__force const __le32)*p;
95669 }
95670 static inline __u32 __le32_to_cpup(const __le32 *p)
95671 {
95672- return (__force __u32)*p;
95673+ return (__force const __u32)*p;
95674 }
95675 static inline __le16 __cpu_to_le16p(const __u16 *p)
95676 {
95677- return (__force __le16)*p;
95678+ return (__force const __le16)*p;
95679 }
95680 static inline __u16 __le16_to_cpup(const __le16 *p)
95681 {
95682- return (__force __u16)*p;
95683+ return (__force const __u16)*p;
95684 }
95685 static inline __be64 __cpu_to_be64p(const __u64 *p)
95686 {
95687- return (__force __be64)__swab64p(p);
95688+ return (__force const __be64)__swab64p(p);
95689 }
95690 static inline __u64 __be64_to_cpup(const __be64 *p)
95691 {
95692- return __swab64p((__u64 *)p);
95693+ return __swab64p((const __u64 *)p);
95694 }
95695 static inline __be32 __cpu_to_be32p(const __u32 *p)
95696 {
95697- return (__force __be32)__swab32p(p);
95698+ return (__force const __be32)__swab32p(p);
95699 }
95700-static inline __u32 __be32_to_cpup(const __be32 *p)
95701+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
95702 {
95703- return __swab32p((__u32 *)p);
95704+ return __swab32p((const __u32 *)p);
95705 }
95706 static inline __be16 __cpu_to_be16p(const __u16 *p)
95707 {
95708- return (__force __be16)__swab16p(p);
95709+ return (__force const __be16)__swab16p(p);
95710 }
95711 static inline __u16 __be16_to_cpup(const __be16 *p)
95712 {
95713- return __swab16p((__u16 *)p);
95714+ return __swab16p((const __u16 *)p);
95715 }
95716 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
95717 #define __le64_to_cpus(x) do { (void)(x); } while (0)
95718diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
95719index 71e1d0e..6cc9caf 100644
95720--- a/include/uapi/linux/elf.h
95721+++ b/include/uapi/linux/elf.h
95722@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
95723 #define PT_GNU_EH_FRAME 0x6474e550
95724
95725 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
95726+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
95727+
95728+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
95729+
95730+/* Constants for the e_flags field */
95731+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
95732+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
95733+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
95734+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
95735+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
95736+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
95737
95738 /*
95739 * Extended Numbering
95740@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
95741 #define DT_DEBUG 21
95742 #define DT_TEXTREL 22
95743 #define DT_JMPREL 23
95744+#define DT_FLAGS 30
95745+ #define DF_TEXTREL 0x00000004
95746 #define DT_ENCODING 32
95747 #define OLD_DT_LOOS 0x60000000
95748 #define DT_LOOS 0x6000000d
95749@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
95750 #define PF_W 0x2
95751 #define PF_X 0x1
95752
95753+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
95754+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
95755+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
95756+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
95757+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
95758+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
95759+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
95760+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
95761+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
95762+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
95763+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
95764+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
95765+
95766 typedef struct elf32_phdr{
95767 Elf32_Word p_type;
95768 Elf32_Off p_offset;
95769@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
95770 #define EI_OSABI 7
95771 #define EI_PAD 8
95772
95773+#define EI_PAX 14
95774+
95775 #define ELFMAG0 0x7f /* EI_MAG */
95776 #define ELFMAG1 'E'
95777 #define ELFMAG2 'L'
95778diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
95779index aa169c4..6a2771d 100644
95780--- a/include/uapi/linux/personality.h
95781+++ b/include/uapi/linux/personality.h
95782@@ -30,6 +30,7 @@ enum {
95783 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
95784 ADDR_NO_RANDOMIZE | \
95785 ADDR_COMPAT_LAYOUT | \
95786+ ADDR_LIMIT_3GB | \
95787 MMAP_PAGE_ZERO)
95788
95789 /*
95790diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
95791index 7530e74..e714828 100644
95792--- a/include/uapi/linux/screen_info.h
95793+++ b/include/uapi/linux/screen_info.h
95794@@ -43,7 +43,8 @@ struct screen_info {
95795 __u16 pages; /* 0x32 */
95796 __u16 vesa_attributes; /* 0x34 */
95797 __u32 capabilities; /* 0x36 */
95798- __u8 _reserved[6]; /* 0x3a */
95799+ __u16 vesapm_size; /* 0x3a */
95800+ __u8 _reserved[4]; /* 0x3c */
95801 } __attribute__((packed));
95802
95803 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
95804diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
95805index 0e011eb..82681b1 100644
95806--- a/include/uapi/linux/swab.h
95807+++ b/include/uapi/linux/swab.h
95808@@ -43,7 +43,7 @@
95809 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
95810 */
95811
95812-static inline __attribute_const__ __u16 __fswab16(__u16 val)
95813+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
95814 {
95815 #ifdef __HAVE_BUILTIN_BSWAP16__
95816 return __builtin_bswap16(val);
95817@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
95818 #endif
95819 }
95820
95821-static inline __attribute_const__ __u32 __fswab32(__u32 val)
95822+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
95823 {
95824 #ifdef __HAVE_BUILTIN_BSWAP32__
95825 return __builtin_bswap32(val);
95826@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
95827 #endif
95828 }
95829
95830-static inline __attribute_const__ __u64 __fswab64(__u64 val)
95831+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
95832 {
95833 #ifdef __HAVE_BUILTIN_BSWAP64__
95834 return __builtin_bswap64(val);
95835diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
95836index 1590c49..5eab462 100644
95837--- a/include/uapi/linux/xattr.h
95838+++ b/include/uapi/linux/xattr.h
95839@@ -73,5 +73,9 @@
95840 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
95841 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
95842
95843+/* User namespace */
95844+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
95845+#define XATTR_PAX_FLAGS_SUFFIX "flags"
95846+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
95847
95848 #endif /* _UAPI_LINUX_XATTR_H */
95849diff --git a/include/video/udlfb.h b/include/video/udlfb.h
95850index f9466fa..f4e2b81 100644
95851--- a/include/video/udlfb.h
95852+++ b/include/video/udlfb.h
95853@@ -53,10 +53,10 @@ struct dlfb_data {
95854 u32 pseudo_palette[256];
95855 int blank_mode; /*one of FB_BLANK_ */
95856 /* blit-only rendering path metrics, exposed through sysfs */
95857- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
95858- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
95859- atomic_t bytes_sent; /* to usb, after compression including overhead */
95860- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
95861+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
95862+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
95863+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
95864+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
95865 };
95866
95867 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
95868diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
95869index 30f5362..8ed8ac9 100644
95870--- a/include/video/uvesafb.h
95871+++ b/include/video/uvesafb.h
95872@@ -122,6 +122,7 @@ struct uvesafb_par {
95873 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
95874 u8 pmi_setpal; /* PMI for palette changes */
95875 u16 *pmi_base; /* protected mode interface location */
95876+ u8 *pmi_code; /* protected mode code location */
95877 void *pmi_start;
95878 void *pmi_pal;
95879 u8 *vbe_state_orig; /*
95880diff --git a/init/Kconfig b/init/Kconfig
95881index f5dbc6d..8259396 100644
95882--- a/init/Kconfig
95883+++ b/init/Kconfig
95884@@ -1136,6 +1136,7 @@ endif # CGROUPS
95885
95886 config CHECKPOINT_RESTORE
95887 bool "Checkpoint/restore support" if EXPERT
95888+ depends on !GRKERNSEC
95889 default n
95890 help
95891 Enables additional kernel features in a sake of checkpoint/restore.
95892@@ -1646,7 +1647,7 @@ config SLUB_DEBUG
95893
95894 config COMPAT_BRK
95895 bool "Disable heap randomization"
95896- default y
95897+ default n
95898 help
95899 Randomizing heap placement makes heap exploits harder, but it
95900 also breaks ancient binaries (including anything libc5 based).
95901@@ -1977,7 +1978,7 @@ config INIT_ALL_POSSIBLE
95902 config STOP_MACHINE
95903 bool
95904 default y
95905- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
95906+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
95907 help
95908 Need stop_machine() primitive.
95909
95910diff --git a/init/Makefile b/init/Makefile
95911index 7bc47ee..6da2dc7 100644
95912--- a/init/Makefile
95913+++ b/init/Makefile
95914@@ -2,6 +2,9 @@
95915 # Makefile for the linux kernel.
95916 #
95917
95918+ccflags-y := $(GCC_PLUGINS_CFLAGS)
95919+asflags-y := $(GCC_PLUGINS_AFLAGS)
95920+
95921 obj-y := main.o version.o mounts.o
95922 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
95923 obj-y += noinitramfs.o
95924diff --git a/init/do_mounts.c b/init/do_mounts.c
95925index eb41008..f5dbbf9 100644
95926--- a/init/do_mounts.c
95927+++ b/init/do_mounts.c
95928@@ -360,11 +360,11 @@ static void __init get_fs_names(char *page)
95929 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
95930 {
95931 struct super_block *s;
95932- int err = sys_mount(name, "/root", fs, flags, data);
95933+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
95934 if (err)
95935 return err;
95936
95937- sys_chdir("/root");
95938+ sys_chdir((const char __force_user *)"/root");
95939 s = current->fs->pwd.dentry->d_sb;
95940 ROOT_DEV = s->s_dev;
95941 printk(KERN_INFO
95942@@ -487,18 +487,18 @@ void __init change_floppy(char *fmt, ...)
95943 va_start(args, fmt);
95944 vsprintf(buf, fmt, args);
95945 va_end(args);
95946- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
95947+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
95948 if (fd >= 0) {
95949 sys_ioctl(fd, FDEJECT, 0);
95950 sys_close(fd);
95951 }
95952 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
95953- fd = sys_open("/dev/console", O_RDWR, 0);
95954+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
95955 if (fd >= 0) {
95956 sys_ioctl(fd, TCGETS, (long)&termios);
95957 termios.c_lflag &= ~ICANON;
95958 sys_ioctl(fd, TCSETSF, (long)&termios);
95959- sys_read(fd, &c, 1);
95960+ sys_read(fd, (char __user *)&c, 1);
95961 termios.c_lflag |= ICANON;
95962 sys_ioctl(fd, TCSETSF, (long)&termios);
95963 sys_close(fd);
95964@@ -592,8 +592,8 @@ void __init prepare_namespace(void)
95965 mount_root();
95966 out:
95967 devtmpfs_mount("dev");
95968- sys_mount(".", "/", NULL, MS_MOVE, NULL);
95969- sys_chroot(".");
95970+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
95971+ sys_chroot((const char __force_user *)".");
95972 }
95973
95974 static bool is_tmpfs;
95975diff --git a/init/do_mounts.h b/init/do_mounts.h
95976index f5b978a..69dbfe8 100644
95977--- a/init/do_mounts.h
95978+++ b/init/do_mounts.h
95979@@ -15,15 +15,15 @@ extern int root_mountflags;
95980
95981 static inline int create_dev(char *name, dev_t dev)
95982 {
95983- sys_unlink(name);
95984- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
95985+ sys_unlink((char __force_user *)name);
95986+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
95987 }
95988
95989 #if BITS_PER_LONG == 32
95990 static inline u32 bstat(char *name)
95991 {
95992 struct stat64 stat;
95993- if (sys_stat64(name, &stat) != 0)
95994+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
95995 return 0;
95996 if (!S_ISBLK(stat.st_mode))
95997 return 0;
95998@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
95999 static inline u32 bstat(char *name)
96000 {
96001 struct stat stat;
96002- if (sys_newstat(name, &stat) != 0)
96003+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
96004 return 0;
96005 if (!S_ISBLK(stat.st_mode))
96006 return 0;
96007diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
96008index 3e0878e..8a9d7a0 100644
96009--- a/init/do_mounts_initrd.c
96010+++ b/init/do_mounts_initrd.c
96011@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
96012 {
96013 sys_unshare(CLONE_FS | CLONE_FILES);
96014 /* stdin/stdout/stderr for /linuxrc */
96015- sys_open("/dev/console", O_RDWR, 0);
96016+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
96017 sys_dup(0);
96018 sys_dup(0);
96019 /* move initrd over / and chdir/chroot in initrd root */
96020- sys_chdir("/root");
96021- sys_mount(".", "/", NULL, MS_MOVE, NULL);
96022- sys_chroot(".");
96023+ sys_chdir((const char __force_user *)"/root");
96024+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
96025+ sys_chroot((const char __force_user *)".");
96026 sys_setsid();
96027 return 0;
96028 }
96029@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
96030 create_dev("/dev/root.old", Root_RAM0);
96031 /* mount initrd on rootfs' /root */
96032 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
96033- sys_mkdir("/old", 0700);
96034- sys_chdir("/old");
96035+ sys_mkdir((const char __force_user *)"/old", 0700);
96036+ sys_chdir((const char __force_user *)"/old");
96037
96038 /* try loading default modules from initrd */
96039 load_default_modules();
96040@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
96041 current->flags &= ~PF_FREEZER_SKIP;
96042
96043 /* move initrd to rootfs' /old */
96044- sys_mount("..", ".", NULL, MS_MOVE, NULL);
96045+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
96046 /* switch root and cwd back to / of rootfs */
96047- sys_chroot("..");
96048+ sys_chroot((const char __force_user *)"..");
96049
96050 if (new_decode_dev(real_root_dev) == Root_RAM0) {
96051- sys_chdir("/old");
96052+ sys_chdir((const char __force_user *)"/old");
96053 return;
96054 }
96055
96056- sys_chdir("/");
96057+ sys_chdir((const char __force_user *)"/");
96058 ROOT_DEV = new_decode_dev(real_root_dev);
96059 mount_root();
96060
96061 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
96062- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
96063+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
96064 if (!error)
96065 printk("okay\n");
96066 else {
96067- int fd = sys_open("/dev/root.old", O_RDWR, 0);
96068+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
96069 if (error == -ENOENT)
96070 printk("/initrd does not exist. Ignored.\n");
96071 else
96072 printk("failed\n");
96073 printk(KERN_NOTICE "Unmounting old root\n");
96074- sys_umount("/old", MNT_DETACH);
96075+ sys_umount((char __force_user *)"/old", MNT_DETACH);
96076 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
96077 if (fd < 0) {
96078 error = fd;
96079@@ -127,11 +127,11 @@ int __init initrd_load(void)
96080 * mounted in the normal path.
96081 */
96082 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
96083- sys_unlink("/initrd.image");
96084+ sys_unlink((const char __force_user *)"/initrd.image");
96085 handle_initrd();
96086 return 1;
96087 }
96088 }
96089- sys_unlink("/initrd.image");
96090+ sys_unlink((const char __force_user *)"/initrd.image");
96091 return 0;
96092 }
96093diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
96094index 8cb6db5..d729f50 100644
96095--- a/init/do_mounts_md.c
96096+++ b/init/do_mounts_md.c
96097@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
96098 partitioned ? "_d" : "", minor,
96099 md_setup_args[ent].device_names);
96100
96101- fd = sys_open(name, 0, 0);
96102+ fd = sys_open((char __force_user *)name, 0, 0);
96103 if (fd < 0) {
96104 printk(KERN_ERR "md: open failed - cannot start "
96105 "array %s\n", name);
96106@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
96107 * array without it
96108 */
96109 sys_close(fd);
96110- fd = sys_open(name, 0, 0);
96111+ fd = sys_open((char __force_user *)name, 0, 0);
96112 sys_ioctl(fd, BLKRRPART, 0);
96113 }
96114 sys_close(fd);
96115@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
96116
96117 wait_for_device_probe();
96118
96119- fd = sys_open("/dev/md0", 0, 0);
96120+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
96121 if (fd >= 0) {
96122 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
96123 sys_close(fd);
96124diff --git a/init/init_task.c b/init/init_task.c
96125index ba0a7f36..2bcf1d5 100644
96126--- a/init/init_task.c
96127+++ b/init/init_task.c
96128@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
96129 * Initial thread structure. Alignment of this is handled by a special
96130 * linker map entry.
96131 */
96132+#ifdef CONFIG_X86
96133+union thread_union init_thread_union __init_task_data;
96134+#else
96135 union thread_union init_thread_union __init_task_data =
96136 { INIT_THREAD_INFO(init_task) };
96137+#endif
96138diff --git a/init/initramfs.c b/init/initramfs.c
96139index ad1bd77..dca2c1b 100644
96140--- a/init/initramfs.c
96141+++ b/init/initramfs.c
96142@@ -25,7 +25,7 @@ static ssize_t __init xwrite(int fd, const char *p, size_t count)
96143
96144 /* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
96145 while (count) {
96146- ssize_t rv = sys_write(fd, p, count);
96147+ ssize_t rv = sys_write(fd, (char __force_user *)p, count);
96148
96149 if (rv < 0) {
96150 if (rv == -EINTR || rv == -EAGAIN)
96151@@ -107,7 +107,7 @@ static void __init free_hash(void)
96152 }
96153 }
96154
96155-static long __init do_utime(char *filename, time_t mtime)
96156+static long __init do_utime(char __force_user *filename, time_t mtime)
96157 {
96158 struct timespec t[2];
96159
96160@@ -142,7 +142,7 @@ static void __init dir_utime(void)
96161 struct dir_entry *de, *tmp;
96162 list_for_each_entry_safe(de, tmp, &dir_list, list) {
96163 list_del(&de->list);
96164- do_utime(de->name, de->mtime);
96165+ do_utime((char __force_user *)de->name, de->mtime);
96166 kfree(de->name);
96167 kfree(de);
96168 }
96169@@ -304,7 +304,7 @@ static int __init maybe_link(void)
96170 if (nlink >= 2) {
96171 char *old = find_link(major, minor, ino, mode, collected);
96172 if (old)
96173- return (sys_link(old, collected) < 0) ? -1 : 1;
96174+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
96175 }
96176 return 0;
96177 }
96178@@ -313,11 +313,11 @@ static void __init clean_path(char *path, umode_t fmode)
96179 {
96180 struct stat st;
96181
96182- if (!sys_newlstat(path, &st) && (st.st_mode ^ fmode) & S_IFMT) {
96183+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode ^ fmode) & S_IFMT) {
96184 if (S_ISDIR(st.st_mode))
96185- sys_rmdir(path);
96186+ sys_rmdir((char __force_user *)path);
96187 else
96188- sys_unlink(path);
96189+ sys_unlink((char __force_user *)path);
96190 }
96191 }
96192
96193@@ -338,7 +338,7 @@ static int __init do_name(void)
96194 int openflags = O_WRONLY|O_CREAT;
96195 if (ml != 1)
96196 openflags |= O_TRUNC;
96197- wfd = sys_open(collected, openflags, mode);
96198+ wfd = sys_open((char __force_user *)collected, openflags, mode);
96199
96200 if (wfd >= 0) {
96201 sys_fchown(wfd, uid, gid);
96202@@ -350,17 +350,17 @@ static int __init do_name(void)
96203 }
96204 }
96205 } else if (S_ISDIR(mode)) {
96206- sys_mkdir(collected, mode);
96207- sys_chown(collected, uid, gid);
96208- sys_chmod(collected, mode);
96209+ sys_mkdir((char __force_user *)collected, mode);
96210+ sys_chown((char __force_user *)collected, uid, gid);
96211+ sys_chmod((char __force_user *)collected, mode);
96212 dir_add(collected, mtime);
96213 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
96214 S_ISFIFO(mode) || S_ISSOCK(mode)) {
96215 if (maybe_link() == 0) {
96216- sys_mknod(collected, mode, rdev);
96217- sys_chown(collected, uid, gid);
96218- sys_chmod(collected, mode);
96219- do_utime(collected, mtime);
96220+ sys_mknod((char __force_user *)collected, mode, rdev);
96221+ sys_chown((char __force_user *)collected, uid, gid);
96222+ sys_chmod((char __force_user *)collected, mode);
96223+ do_utime((char __force_user *)collected, mtime);
96224 }
96225 }
96226 return 0;
96227@@ -372,7 +372,7 @@ static int __init do_copy(void)
96228 if (xwrite(wfd, victim, body_len) != body_len)
96229 error("write error");
96230 sys_close(wfd);
96231- do_utime(vcollected, mtime);
96232+ do_utime((char __force_user *)vcollected, mtime);
96233 kfree(vcollected);
96234 eat(body_len);
96235 state = SkipIt;
96236@@ -390,9 +390,9 @@ static int __init do_symlink(void)
96237 {
96238 collected[N_ALIGN(name_len) + body_len] = '\0';
96239 clean_path(collected, 0);
96240- sys_symlink(collected + N_ALIGN(name_len), collected);
96241- sys_lchown(collected, uid, gid);
96242- do_utime(collected, mtime);
96243+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
96244+ sys_lchown((char __force_user *)collected, uid, gid);
96245+ do_utime((char __force_user *)collected, mtime);
96246 state = SkipIt;
96247 next_state = Reset;
96248 return 0;
96249diff --git a/init/main.c b/init/main.c
96250index 6f0f1c5f..a542824 100644
96251--- a/init/main.c
96252+++ b/init/main.c
96253@@ -96,6 +96,8 @@ extern void radix_tree_init(void);
96254 static inline void mark_rodata_ro(void) { }
96255 #endif
96256
96257+extern void grsecurity_init(void);
96258+
96259 /*
96260 * Debug helper: via this flag we know that we are in 'early bootup code'
96261 * where only the boot processor is running with IRQ disabled. This means
96262@@ -157,6 +159,85 @@ static int __init set_reset_devices(char *str)
96263
96264 __setup("reset_devices", set_reset_devices);
96265
96266+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
96267+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
96268+static int __init setup_grsec_proc_gid(char *str)
96269+{
96270+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
96271+ return 1;
96272+}
96273+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
96274+#endif
96275+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
96276+int grsec_enable_sysfs_restrict = 1;
96277+static int __init setup_grsec_sysfs_restrict(char *str)
96278+{
96279+ if (!simple_strtol(str, NULL, 0))
96280+ grsec_enable_sysfs_restrict = 0;
96281+ return 1;
96282+}
96283+__setup("grsec_sysfs_restrict", setup_grsec_sysfs_restrict);
96284+#endif
96285+
96286+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
96287+unsigned long pax_user_shadow_base __read_only;
96288+EXPORT_SYMBOL(pax_user_shadow_base);
96289+extern char pax_enter_kernel_user[];
96290+extern char pax_exit_kernel_user[];
96291+#endif
96292+
96293+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
96294+static int __init setup_pax_nouderef(char *str)
96295+{
96296+#ifdef CONFIG_X86_32
96297+ unsigned int cpu;
96298+ struct desc_struct *gdt;
96299+
96300+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
96301+ gdt = get_cpu_gdt_table(cpu);
96302+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
96303+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
96304+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
96305+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
96306+ }
96307+ loadsegment(ds, __KERNEL_DS);
96308+ loadsegment(es, __KERNEL_DS);
96309+ loadsegment(ss, __KERNEL_DS);
96310+#else
96311+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
96312+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
96313+ clone_pgd_mask = ~(pgdval_t)0UL;
96314+ pax_user_shadow_base = 0UL;
96315+ setup_clear_cpu_cap(X86_FEATURE_PCID);
96316+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
96317+#endif
96318+
96319+ return 0;
96320+}
96321+early_param("pax_nouderef", setup_pax_nouderef);
96322+
96323+#ifdef CONFIG_X86_64
96324+static int __init setup_pax_weakuderef(char *str)
96325+{
96326+ if (clone_pgd_mask != ~(pgdval_t)0UL)
96327+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
96328+ return 1;
96329+}
96330+__setup("pax_weakuderef", setup_pax_weakuderef);
96331+#endif
96332+#endif
96333+
96334+#ifdef CONFIG_PAX_SOFTMODE
96335+int pax_softmode;
96336+
96337+static int __init setup_pax_softmode(char *str)
96338+{
96339+ get_option(&str, &pax_softmode);
96340+ return 1;
96341+}
96342+__setup("pax_softmode=", setup_pax_softmode);
96343+#endif
96344+
96345 static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
96346 const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
96347 static const char *panic_later, *panic_param;
96348@@ -722,7 +803,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
96349 struct blacklist_entry *entry;
96350 char *fn_name;
96351
96352- fn_name = kasprintf(GFP_KERNEL, "%pf", fn);
96353+ fn_name = kasprintf(GFP_KERNEL, "%pX", fn);
96354 if (!fn_name)
96355 return false;
96356
96357@@ -774,7 +855,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
96358 {
96359 int count = preempt_count();
96360 int ret;
96361- char msgbuf[64];
96362+ const char *msg1 = "", *msg2 = "";
96363
96364 if (initcall_blacklisted(fn))
96365 return -EPERM;
96366@@ -784,18 +865,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
96367 else
96368 ret = fn();
96369
96370- msgbuf[0] = 0;
96371-
96372 if (preempt_count() != count) {
96373- sprintf(msgbuf, "preemption imbalance ");
96374+ msg1 = " preemption imbalance";
96375 preempt_count_set(count);
96376 }
96377 if (irqs_disabled()) {
96378- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
96379+ msg2 = " disabled interrupts";
96380 local_irq_enable();
96381 }
96382- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
96383+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
96384
96385+ add_latent_entropy();
96386 return ret;
96387 }
96388
96389@@ -901,8 +981,8 @@ static int run_init_process(const char *init_filename)
96390 {
96391 argv_init[0] = init_filename;
96392 return do_execve(getname_kernel(init_filename),
96393- (const char __user *const __user *)argv_init,
96394- (const char __user *const __user *)envp_init);
96395+ (const char __user *const __force_user *)argv_init,
96396+ (const char __user *const __force_user *)envp_init);
96397 }
96398
96399 static int try_to_run_init_process(const char *init_filename)
96400@@ -919,6 +999,10 @@ static int try_to_run_init_process(const char *init_filename)
96401 return ret;
96402 }
96403
96404+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
96405+extern int gr_init_ran;
96406+#endif
96407+
96408 static noinline void __init kernel_init_freeable(void);
96409
96410 static int __ref kernel_init(void *unused)
96411@@ -943,6 +1027,11 @@ static int __ref kernel_init(void *unused)
96412 ramdisk_execute_command, ret);
96413 }
96414
96415+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
96416+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
96417+ gr_init_ran = 1;
96418+#endif
96419+
96420 /*
96421 * We try each of these until one succeeds.
96422 *
96423@@ -998,7 +1087,7 @@ static noinline void __init kernel_init_freeable(void)
96424 do_basic_setup();
96425
96426 /* Open the /dev/console on the rootfs, this should never fail */
96427- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
96428+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
96429 pr_err("Warning: unable to open an initial console.\n");
96430
96431 (void) sys_dup(0);
96432@@ -1011,11 +1100,13 @@ static noinline void __init kernel_init_freeable(void)
96433 if (!ramdisk_execute_command)
96434 ramdisk_execute_command = "/init";
96435
96436- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
96437+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
96438 ramdisk_execute_command = NULL;
96439 prepare_namespace();
96440 }
96441
96442+ grsecurity_init();
96443+
96444 /*
96445 * Ok, we have completed the initial bootup, and
96446 * we're essentially up and running. Get rid of the
96447diff --git a/ipc/compat.c b/ipc/compat.c
96448index 9b3c85f..5266b0f 100644
96449--- a/ipc/compat.c
96450+++ b/ipc/compat.c
96451@@ -396,7 +396,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
96452 COMPAT_SHMLBA);
96453 if (err < 0)
96454 return err;
96455- return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
96456+ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
96457 }
96458 case SHMDT:
96459 return sys_shmdt(compat_ptr(ptr));
96460@@ -747,7 +747,7 @@ COMPAT_SYSCALL_DEFINE3(shmctl, int, first, int, second, void __user *, uptr)
96461 }
96462
96463 COMPAT_SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsems,
96464- unsigned, nsops,
96465+ compat_long_t, nsops,
96466 const struct compat_timespec __user *, timeout)
96467 {
96468 struct timespec __user *ts64;
96469diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
96470index 8ad93c2..efd80f8 100644
96471--- a/ipc/ipc_sysctl.c
96472+++ b/ipc/ipc_sysctl.c
96473@@ -30,7 +30,7 @@ static void *get_ipc(struct ctl_table *table)
96474 static int proc_ipc_dointvec(struct ctl_table *table, int write,
96475 void __user *buffer, size_t *lenp, loff_t *ppos)
96476 {
96477- struct ctl_table ipc_table;
96478+ ctl_table_no_const ipc_table;
96479
96480 memcpy(&ipc_table, table, sizeof(ipc_table));
96481 ipc_table.data = get_ipc(table);
96482@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(struct ctl_table *table, int write,
96483 static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
96484 void __user *buffer, size_t *lenp, loff_t *ppos)
96485 {
96486- struct ctl_table ipc_table;
96487+ ctl_table_no_const ipc_table;
96488
96489 memcpy(&ipc_table, table, sizeof(ipc_table));
96490 ipc_table.data = get_ipc(table);
96491@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
96492 static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
96493 void __user *buffer, size_t *lenp, loff_t *ppos)
96494 {
96495- struct ctl_table ipc_table;
96496+ ctl_table_no_const ipc_table;
96497 memcpy(&ipc_table, table, sizeof(ipc_table));
96498 ipc_table.data = get_ipc(table);
96499
96500@@ -76,7 +76,7 @@ static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
96501 static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
96502 void __user *buffer, size_t *lenp, loff_t *ppos)
96503 {
96504- struct ctl_table ipc_table;
96505+ ctl_table_no_const ipc_table;
96506 int dummy = 0;
96507
96508 memcpy(&ipc_table, table, sizeof(ipc_table));
96509diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
96510index 68d4e95..1477ded 100644
96511--- a/ipc/mq_sysctl.c
96512+++ b/ipc/mq_sysctl.c
96513@@ -25,7 +25,7 @@ static void *get_mq(struct ctl_table *table)
96514 static int proc_mq_dointvec(struct ctl_table *table, int write,
96515 void __user *buffer, size_t *lenp, loff_t *ppos)
96516 {
96517- struct ctl_table mq_table;
96518+ ctl_table_no_const mq_table;
96519 memcpy(&mq_table, table, sizeof(mq_table));
96520 mq_table.data = get_mq(table);
96521
96522@@ -35,7 +35,7 @@ static int proc_mq_dointvec(struct ctl_table *table, int write,
96523 static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
96524 void __user *buffer, size_t *lenp, loff_t *ppos)
96525 {
96526- struct ctl_table mq_table;
96527+ ctl_table_no_const mq_table;
96528 memcpy(&mq_table, table, sizeof(mq_table));
96529 mq_table.data = get_mq(table);
96530
96531diff --git a/ipc/mqueue.c b/ipc/mqueue.c
96532index 7635a1c..7432cb6 100644
96533--- a/ipc/mqueue.c
96534+++ b/ipc/mqueue.c
96535@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
96536 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
96537 info->attr.mq_msgsize);
96538
96539+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
96540 spin_lock(&mq_lock);
96541 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
96542 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
96543diff --git a/ipc/sem.c b/ipc/sem.c
96544index 9284211..bca5b1b 100644
96545--- a/ipc/sem.c
96546+++ b/ipc/sem.c
96547@@ -1780,7 +1780,7 @@ static int get_queue_result(struct sem_queue *q)
96548 }
96549
96550 SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
96551- unsigned, nsops, const struct timespec __user *, timeout)
96552+ long, nsops, const struct timespec __user *, timeout)
96553 {
96554 int error = -EINVAL;
96555 struct sem_array *sma;
96556@@ -2015,7 +2015,7 @@ out_free:
96557 }
96558
96559 SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
96560- unsigned, nsops)
96561+ long, nsops)
96562 {
96563 return sys_semtimedop(semid, tsops, nsops, NULL);
96564 }
96565diff --git a/ipc/shm.c b/ipc/shm.c
96566index 19633b4..d454904 100644
96567--- a/ipc/shm.c
96568+++ b/ipc/shm.c
96569@@ -72,6 +72,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
96570 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
96571 #endif
96572
96573+#ifdef CONFIG_GRKERNSEC
96574+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
96575+ const u64 shm_createtime, const kuid_t cuid,
96576+ const int shmid);
96577+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
96578+ const u64 shm_createtime);
96579+#endif
96580+
96581 void shm_init_ns(struct ipc_namespace *ns)
96582 {
96583 ns->shm_ctlmax = SHMMAX;
96584@@ -560,6 +568,9 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
96585 shp->shm_lprid = 0;
96586 shp->shm_atim = shp->shm_dtim = 0;
96587 shp->shm_ctim = get_seconds();
96588+#ifdef CONFIG_GRKERNSEC
96589+ shp->shm_createtime = ktime_get_ns();
96590+#endif
96591 shp->shm_segsz = size;
96592 shp->shm_nattch = 0;
96593 shp->shm_file = file;
96594@@ -1096,6 +1107,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
96595 f_mode = FMODE_READ | FMODE_WRITE;
96596 }
96597 if (shmflg & SHM_EXEC) {
96598+
96599+#ifdef CONFIG_PAX_MPROTECT
96600+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
96601+ goto out;
96602+#endif
96603+
96604 prot |= PROT_EXEC;
96605 acc_mode |= S_IXUGO;
96606 }
96607@@ -1120,6 +1137,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
96608 if (err)
96609 goto out_unlock;
96610
96611+#ifdef CONFIG_GRKERNSEC
96612+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
96613+ shp->shm_perm.cuid, shmid) ||
96614+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
96615+ err = -EACCES;
96616+ goto out_unlock;
96617+ }
96618+#endif
96619+
96620 ipc_lock_object(&shp->shm_perm);
96621
96622 /* check if shm_destroy() is tearing down shp */
96623@@ -1132,6 +1158,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
96624 path = shp->shm_file->f_path;
96625 path_get(&path);
96626 shp->shm_nattch++;
96627+#ifdef CONFIG_GRKERNSEC
96628+ shp->shm_lapid = current->pid;
96629+#endif
96630 size = i_size_read(path.dentry->d_inode);
96631 ipc_unlock_object(&shp->shm_perm);
96632 rcu_read_unlock();
96633diff --git a/ipc/util.c b/ipc/util.c
96634index 106bed0..f851429 100644
96635--- a/ipc/util.c
96636+++ b/ipc/util.c
96637@@ -71,6 +71,8 @@ struct ipc_proc_iface {
96638 int (*show)(struct seq_file *, void *);
96639 };
96640
96641+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
96642+
96643 /**
96644 * ipc_init - initialise ipc subsystem
96645 *
96646@@ -497,6 +499,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
96647 granted_mode >>= 6;
96648 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
96649 granted_mode >>= 3;
96650+
96651+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
96652+ return -1;
96653+
96654 /* is there some bit set in requested_mode but not in granted_mode? */
96655 if ((requested_mode & ~granted_mode & 0007) &&
96656 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
96657diff --git a/kernel/audit.c b/kernel/audit.c
96658index 72ab759..757deba 100644
96659--- a/kernel/audit.c
96660+++ b/kernel/audit.c
96661@@ -122,7 +122,7 @@ u32 audit_sig_sid = 0;
96662 3) suppressed due to audit_rate_limit
96663 4) suppressed due to audit_backlog_limit
96664 */
96665-static atomic_t audit_lost = ATOMIC_INIT(0);
96666+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
96667
96668 /* The netlink socket. */
96669 static struct sock *audit_sock;
96670@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
96671 unsigned long now;
96672 int print;
96673
96674- atomic_inc(&audit_lost);
96675+ atomic_inc_unchecked(&audit_lost);
96676
96677 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
96678
96679@@ -273,7 +273,7 @@ void audit_log_lost(const char *message)
96680 if (print) {
96681 if (printk_ratelimit())
96682 pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
96683- atomic_read(&audit_lost),
96684+ atomic_read_unchecked(&audit_lost),
96685 audit_rate_limit,
96686 audit_backlog_limit);
96687 audit_panic(message);
96688@@ -831,7 +831,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
96689 s.pid = audit_pid;
96690 s.rate_limit = audit_rate_limit;
96691 s.backlog_limit = audit_backlog_limit;
96692- s.lost = atomic_read(&audit_lost);
96693+ s.lost = atomic_read_unchecked(&audit_lost);
96694 s.backlog = skb_queue_len(&audit_skb_queue);
96695 s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL;
96696 s.backlog_wait_time = audit_backlog_wait_time;
96697diff --git a/kernel/auditsc.c b/kernel/auditsc.c
96698index dc4ae70..2a2bddc 100644
96699--- a/kernel/auditsc.c
96700+++ b/kernel/auditsc.c
96701@@ -1955,7 +1955,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
96702 }
96703
96704 /* global counter which is incremented every time something logs in */
96705-static atomic_t session_id = ATOMIC_INIT(0);
96706+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
96707
96708 static int audit_set_loginuid_perm(kuid_t loginuid)
96709 {
96710@@ -2022,7 +2022,7 @@ int audit_set_loginuid(kuid_t loginuid)
96711
96712 /* are we setting or clearing? */
96713 if (uid_valid(loginuid))
96714- sessionid = (unsigned int)atomic_inc_return(&session_id);
96715+ sessionid = (unsigned int)atomic_inc_return_unchecked(&session_id);
96716
96717 task->sessionid = sessionid;
96718 task->loginuid = loginuid;
96719diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
96720index 0c5796e..a9414e2 100644
96721--- a/kernel/bpf/core.c
96722+++ b/kernel/bpf/core.c
96723@@ -143,14 +143,17 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
96724 * random section of illegal instructions.
96725 */
96726 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
96727- hdr = module_alloc(size);
96728+ hdr = module_alloc_exec(size);
96729 if (hdr == NULL)
96730 return NULL;
96731
96732 /* Fill space with illegal/arch-dep instructions. */
96733 bpf_fill_ill_insns(hdr, size);
96734
96735+ pax_open_kernel();
96736 hdr->pages = size / PAGE_SIZE;
96737+ pax_close_kernel();
96738+
96739 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
96740 PAGE_SIZE - sizeof(*hdr));
96741 start = (prandom_u32() % hole) & ~(alignment - 1);
96742@@ -163,7 +166,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
96743
96744 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
96745 {
96746- module_memfree(hdr);
96747+ module_memfree_exec(hdr);
96748 }
96749 #endif /* CONFIG_BPF_JIT */
96750
96751diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
96752index 536edc2..d28c85d 100644
96753--- a/kernel/bpf/syscall.c
96754+++ b/kernel/bpf/syscall.c
96755@@ -548,11 +548,15 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
96756 int err;
96757
96758 /* the syscall is limited to root temporarily. This restriction will be
96759- * lifted when security audit is clean. Note that eBPF+tracing must have
96760- * this restriction, since it may pass kernel data to user space
96761+ * lifted by upstream when a half-assed security audit is clean. Note
96762+ * that eBPF+tracing must have this restriction, since it may pass
96763+ * kernel data to user space
96764 */
96765 if (!capable(CAP_SYS_ADMIN))
96766 return -EPERM;
96767+#ifdef CONFIG_GRKERNSEC
96768+ return -EPERM;
96769+#endif
96770
96771 if (!access_ok(VERIFY_READ, uattr, 1))
96772 return -EFAULT;
96773diff --git a/kernel/capability.c b/kernel/capability.c
96774index 989f5bf..d317ca0 100644
96775--- a/kernel/capability.c
96776+++ b/kernel/capability.c
96777@@ -192,6 +192,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
96778 * before modification is attempted and the application
96779 * fails.
96780 */
96781+ if (tocopy > ARRAY_SIZE(kdata))
96782+ return -EFAULT;
96783+
96784 if (copy_to_user(dataptr, kdata, tocopy
96785 * sizeof(struct __user_cap_data_struct))) {
96786 return -EFAULT;
96787@@ -297,10 +300,11 @@ bool has_ns_capability(struct task_struct *t,
96788 int ret;
96789
96790 rcu_read_lock();
96791- ret = security_capable(__task_cred(t), ns, cap);
96792+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
96793+ gr_task_is_capable(t, __task_cred(t), cap);
96794 rcu_read_unlock();
96795
96796- return (ret == 0);
96797+ return ret;
96798 }
96799
96800 /**
96801@@ -337,10 +341,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
96802 int ret;
96803
96804 rcu_read_lock();
96805- ret = security_capable_noaudit(__task_cred(t), ns, cap);
96806+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
96807 rcu_read_unlock();
96808
96809- return (ret == 0);
96810+ return ret;
96811 }
96812
96813 /**
96814@@ -378,7 +382,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
96815 BUG();
96816 }
96817
96818- if (security_capable(current_cred(), ns, cap) == 0) {
96819+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
96820 current->flags |= PF_SUPERPRIV;
96821 return true;
96822 }
96823@@ -386,6 +390,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
96824 }
96825 EXPORT_SYMBOL(ns_capable);
96826
96827+bool ns_capable_nolog(struct user_namespace *ns, int cap)
96828+{
96829+ if (unlikely(!cap_valid(cap))) {
96830+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
96831+ BUG();
96832+ }
96833+
96834+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
96835+ current->flags |= PF_SUPERPRIV;
96836+ return true;
96837+ }
96838+ return false;
96839+}
96840+EXPORT_SYMBOL(ns_capable_nolog);
96841+
96842 /**
96843 * file_ns_capable - Determine if the file's opener had a capability in effect
96844 * @file: The file we want to check
96845@@ -427,6 +446,12 @@ bool capable(int cap)
96846 }
96847 EXPORT_SYMBOL(capable);
96848
96849+bool capable_nolog(int cap)
96850+{
96851+ return ns_capable_nolog(&init_user_ns, cap);
96852+}
96853+EXPORT_SYMBOL(capable_nolog);
96854+
96855 /**
96856 * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
96857 * @inode: The inode in question
96858@@ -444,3 +469,12 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
96859 kgid_has_mapping(ns, inode->i_gid);
96860 }
96861 EXPORT_SYMBOL(capable_wrt_inode_uidgid);
96862+
96863+bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap)
96864+{
96865+ struct user_namespace *ns = current_user_ns();
96866+
96867+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
96868+ kgid_has_mapping(ns, inode->i_gid);
96869+}
96870+EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
96871diff --git a/kernel/cgroup.c b/kernel/cgroup.c
96872index 29a7b2c..a64e30a 100644
96873--- a/kernel/cgroup.c
96874+++ b/kernel/cgroup.c
96875@@ -5347,6 +5347,9 @@ static void cgroup_release_agent(struct work_struct *work)
96876 if (!pathbuf || !agentbuf)
96877 goto out;
96878
96879+ if (agentbuf[0] == '\0')
96880+ goto out;
96881+
96882 path = cgroup_path(cgrp, pathbuf, PATH_MAX);
96883 if (!path)
96884 goto out;
96885@@ -5532,7 +5535,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
96886 struct task_struct *task;
96887 int count = 0;
96888
96889- seq_printf(seq, "css_set %p\n", cset);
96890+ seq_printf(seq, "css_set %pK\n", cset);
96891
96892 list_for_each_entry(task, &cset->tasks, cg_list) {
96893 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
96894diff --git a/kernel/compat.c b/kernel/compat.c
96895index 24f0061..ea80802 100644
96896--- a/kernel/compat.c
96897+++ b/kernel/compat.c
96898@@ -13,6 +13,7 @@
96899
96900 #include <linux/linkage.h>
96901 #include <linux/compat.h>
96902+#include <linux/module.h>
96903 #include <linux/errno.h>
96904 #include <linux/time.h>
96905 #include <linux/signal.h>
96906@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
96907 mm_segment_t oldfs;
96908 long ret;
96909
96910- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
96911+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
96912 oldfs = get_fs();
96913 set_fs(KERNEL_DS);
96914 ret = hrtimer_nanosleep_restart(restart);
96915@@ -252,7 +253,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
96916 oldfs = get_fs();
96917 set_fs(KERNEL_DS);
96918 ret = hrtimer_nanosleep(&tu,
96919- rmtp ? (struct timespec __user *)&rmt : NULL,
96920+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
96921 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
96922 set_fs(oldfs);
96923
96924@@ -378,7 +379,7 @@ COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
96925 mm_segment_t old_fs = get_fs();
96926
96927 set_fs(KERNEL_DS);
96928- ret = sys_sigpending((old_sigset_t __user *) &s);
96929+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
96930 set_fs(old_fs);
96931 if (ret == 0)
96932 ret = put_user(s, set);
96933@@ -468,7 +469,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
96934 mm_segment_t old_fs = get_fs();
96935
96936 set_fs(KERNEL_DS);
96937- ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r);
96938+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
96939 set_fs(old_fs);
96940
96941 if (!ret) {
96942@@ -550,8 +551,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
96943 set_fs (KERNEL_DS);
96944 ret = sys_wait4(pid,
96945 (stat_addr ?
96946- (unsigned int __user *) &status : NULL),
96947- options, (struct rusage __user *) &r);
96948+ (unsigned int __force_user *) &status : NULL),
96949+ options, (struct rusage __force_user *) &r);
96950 set_fs (old_fs);
96951
96952 if (ret > 0) {
96953@@ -577,8 +578,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
96954 memset(&info, 0, sizeof(info));
96955
96956 set_fs(KERNEL_DS);
96957- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
96958- uru ? (struct rusage __user *)&ru : NULL);
96959+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
96960+ uru ? (struct rusage __force_user *)&ru : NULL);
96961 set_fs(old_fs);
96962
96963 if ((ret < 0) || (info.si_signo == 0))
96964@@ -712,8 +713,8 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
96965 oldfs = get_fs();
96966 set_fs(KERNEL_DS);
96967 err = sys_timer_settime(timer_id, flags,
96968- (struct itimerspec __user *) &newts,
96969- (struct itimerspec __user *) &oldts);
96970+ (struct itimerspec __force_user *) &newts,
96971+ (struct itimerspec __force_user *) &oldts);
96972 set_fs(oldfs);
96973 if (!err && old && put_compat_itimerspec(old, &oldts))
96974 return -EFAULT;
96975@@ -730,7 +731,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
96976 oldfs = get_fs();
96977 set_fs(KERNEL_DS);
96978 err = sys_timer_gettime(timer_id,
96979- (struct itimerspec __user *) &ts);
96980+ (struct itimerspec __force_user *) &ts);
96981 set_fs(oldfs);
96982 if (!err && put_compat_itimerspec(setting, &ts))
96983 return -EFAULT;
96984@@ -749,7 +750,7 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
96985 oldfs = get_fs();
96986 set_fs(KERNEL_DS);
96987 err = sys_clock_settime(which_clock,
96988- (struct timespec __user *) &ts);
96989+ (struct timespec __force_user *) &ts);
96990 set_fs(oldfs);
96991 return err;
96992 }
96993@@ -764,7 +765,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
96994 oldfs = get_fs();
96995 set_fs(KERNEL_DS);
96996 err = sys_clock_gettime(which_clock,
96997- (struct timespec __user *) &ts);
96998+ (struct timespec __force_user *) &ts);
96999 set_fs(oldfs);
97000 if (!err && compat_put_timespec(&ts, tp))
97001 return -EFAULT;
97002@@ -784,7 +785,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
97003
97004 oldfs = get_fs();
97005 set_fs(KERNEL_DS);
97006- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
97007+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
97008 set_fs(oldfs);
97009
97010 err = compat_put_timex(utp, &txc);
97011@@ -804,7 +805,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
97012 oldfs = get_fs();
97013 set_fs(KERNEL_DS);
97014 err = sys_clock_getres(which_clock,
97015- (struct timespec __user *) &ts);
97016+ (struct timespec __force_user *) &ts);
97017 set_fs(oldfs);
97018 if (!err && tp && compat_put_timespec(&ts, tp))
97019 return -EFAULT;
97020@@ -818,7 +819,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
97021 struct timespec tu;
97022 struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
97023
97024- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
97025+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
97026 oldfs = get_fs();
97027 set_fs(KERNEL_DS);
97028 err = clock_nanosleep_restart(restart);
97029@@ -850,8 +851,8 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
97030 oldfs = get_fs();
97031 set_fs(KERNEL_DS);
97032 err = sys_clock_nanosleep(which_clock, flags,
97033- (struct timespec __user *) &in,
97034- (struct timespec __user *) &out);
97035+ (struct timespec __force_user *) &in,
97036+ (struct timespec __force_user *) &out);
97037 set_fs(oldfs);
97038
97039 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
97040@@ -1145,7 +1146,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
97041 mm_segment_t old_fs = get_fs();
97042
97043 set_fs(KERNEL_DS);
97044- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
97045+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
97046 set_fs(old_fs);
97047 if (compat_put_timespec(&t, interval))
97048 return -EFAULT;
97049diff --git a/kernel/configs.c b/kernel/configs.c
97050index c18b1f1..b9a0132 100644
97051--- a/kernel/configs.c
97052+++ b/kernel/configs.c
97053@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
97054 struct proc_dir_entry *entry;
97055
97056 /* create the current config file */
97057+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
97058+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
97059+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
97060+ &ikconfig_file_ops);
97061+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
97062+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
97063+ &ikconfig_file_ops);
97064+#endif
97065+#else
97066 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
97067 &ikconfig_file_ops);
97068+#endif
97069+
97070 if (!entry)
97071 return -ENOMEM;
97072
97073diff --git a/kernel/cred.c b/kernel/cred.c
97074index e0573a4..26c0fd3 100644
97075--- a/kernel/cred.c
97076+++ b/kernel/cred.c
97077@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
97078 validate_creds(cred);
97079 alter_cred_subscribers(cred, -1);
97080 put_cred(cred);
97081+
97082+#ifdef CONFIG_GRKERNSEC_SETXID
97083+ cred = (struct cred *) tsk->delayed_cred;
97084+ if (cred != NULL) {
97085+ tsk->delayed_cred = NULL;
97086+ validate_creds(cred);
97087+ alter_cred_subscribers(cred, -1);
97088+ put_cred(cred);
97089+ }
97090+#endif
97091 }
97092
97093 /**
97094@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
97095 * Always returns 0 thus allowing this function to be tail-called at the end
97096 * of, say, sys_setgid().
97097 */
97098-int commit_creds(struct cred *new)
97099+static int __commit_creds(struct cred *new)
97100 {
97101 struct task_struct *task = current;
97102 const struct cred *old = task->real_cred;
97103@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
97104
97105 get_cred(new); /* we will require a ref for the subj creds too */
97106
97107+ gr_set_role_label(task, new->uid, new->gid);
97108+
97109 /* dumpability changes */
97110 if (!uid_eq(old->euid, new->euid) ||
97111 !gid_eq(old->egid, new->egid) ||
97112@@ -479,6 +491,105 @@ int commit_creds(struct cred *new)
97113 put_cred(old);
97114 return 0;
97115 }
97116+#ifdef CONFIG_GRKERNSEC_SETXID
97117+extern int set_user(struct cred *new);
97118+
97119+void gr_delayed_cred_worker(void)
97120+{
97121+ const struct cred *new = current->delayed_cred;
97122+ struct cred *ncred;
97123+
97124+ current->delayed_cred = NULL;
97125+
97126+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
97127+ // from doing get_cred on it when queueing this
97128+ put_cred(new);
97129+ return;
97130+ } else if (new == NULL)
97131+ return;
97132+
97133+ ncred = prepare_creds();
97134+ if (!ncred)
97135+ goto die;
97136+ // uids
97137+ ncred->uid = new->uid;
97138+ ncred->euid = new->euid;
97139+ ncred->suid = new->suid;
97140+ ncred->fsuid = new->fsuid;
97141+ // gids
97142+ ncred->gid = new->gid;
97143+ ncred->egid = new->egid;
97144+ ncred->sgid = new->sgid;
97145+ ncred->fsgid = new->fsgid;
97146+ // groups
97147+ set_groups(ncred, new->group_info);
97148+ // caps
97149+ ncred->securebits = new->securebits;
97150+ ncred->cap_inheritable = new->cap_inheritable;
97151+ ncred->cap_permitted = new->cap_permitted;
97152+ ncred->cap_effective = new->cap_effective;
97153+ ncred->cap_bset = new->cap_bset;
97154+
97155+ if (set_user(ncred)) {
97156+ abort_creds(ncred);
97157+ goto die;
97158+ }
97159+
97160+ // from doing get_cred on it when queueing this
97161+ put_cred(new);
97162+
97163+ __commit_creds(ncred);
97164+ return;
97165+die:
97166+ // from doing get_cred on it when queueing this
97167+ put_cred(new);
97168+ do_group_exit(SIGKILL);
97169+}
97170+#endif
97171+
97172+int commit_creds(struct cred *new)
97173+{
97174+#ifdef CONFIG_GRKERNSEC_SETXID
97175+ int ret;
97176+ int schedule_it = 0;
97177+ struct task_struct *t;
97178+ unsigned oldsecurebits = current_cred()->securebits;
97179+
97180+ /* we won't get called with tasklist_lock held for writing
97181+ and interrupts disabled as the cred struct in that case is
97182+ init_cred
97183+ */
97184+ if (grsec_enable_setxid && !current_is_single_threaded() &&
97185+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
97186+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
97187+ schedule_it = 1;
97188+ }
97189+ ret = __commit_creds(new);
97190+ if (schedule_it) {
97191+ rcu_read_lock();
97192+ read_lock(&tasklist_lock);
97193+ for (t = next_thread(current); t != current;
97194+ t = next_thread(t)) {
97195+ /* we'll check if the thread has uid 0 in
97196+ * the delayed worker routine
97197+ */
97198+ if (task_securebits(t) == oldsecurebits &&
97199+ t->delayed_cred == NULL) {
97200+ t->delayed_cred = get_cred(new);
97201+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
97202+ set_tsk_need_resched(t);
97203+ }
97204+ }
97205+ read_unlock(&tasklist_lock);
97206+ rcu_read_unlock();
97207+ }
97208+
97209+ return ret;
97210+#else
97211+ return __commit_creds(new);
97212+#endif
97213+}
97214+
97215 EXPORT_SYMBOL(commit_creds);
97216
97217 /**
97218diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
97219index 0874e2e..5b32cc9 100644
97220--- a/kernel/debug/debug_core.c
97221+++ b/kernel/debug/debug_core.c
97222@@ -127,7 +127,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
97223 */
97224 static atomic_t masters_in_kgdb;
97225 static atomic_t slaves_in_kgdb;
97226-static atomic_t kgdb_break_tasklet_var;
97227+static atomic_unchecked_t kgdb_break_tasklet_var;
97228 atomic_t kgdb_setting_breakpoint;
97229
97230 struct task_struct *kgdb_usethread;
97231@@ -137,7 +137,7 @@ int kgdb_single_step;
97232 static pid_t kgdb_sstep_pid;
97233
97234 /* to keep track of the CPU which is doing the single stepping*/
97235-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
97236+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
97237
97238 /*
97239 * If you are debugging a problem where roundup (the collection of
97240@@ -552,7 +552,7 @@ return_normal:
97241 * kernel will only try for the value of sstep_tries before
97242 * giving up and continuing on.
97243 */
97244- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
97245+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
97246 (kgdb_info[cpu].task &&
97247 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
97248 atomic_set(&kgdb_active, -1);
97249@@ -654,8 +654,8 @@ cpu_master_loop:
97250 }
97251
97252 kgdb_restore:
97253- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
97254- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
97255+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
97256+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
97257 if (kgdb_info[sstep_cpu].task)
97258 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
97259 else
97260@@ -949,18 +949,18 @@ static void kgdb_unregister_callbacks(void)
97261 static void kgdb_tasklet_bpt(unsigned long ing)
97262 {
97263 kgdb_breakpoint();
97264- atomic_set(&kgdb_break_tasklet_var, 0);
97265+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
97266 }
97267
97268 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
97269
97270 void kgdb_schedule_breakpoint(void)
97271 {
97272- if (atomic_read(&kgdb_break_tasklet_var) ||
97273+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
97274 atomic_read(&kgdb_active) != -1 ||
97275 atomic_read(&kgdb_setting_breakpoint))
97276 return;
97277- atomic_inc(&kgdb_break_tasklet_var);
97278+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
97279 tasklet_schedule(&kgdb_tasklet_breakpoint);
97280 }
97281 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
97282diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
97283index 41213454..861e178 100644
97284--- a/kernel/debug/kdb/kdb_main.c
97285+++ b/kernel/debug/kdb/kdb_main.c
97286@@ -2021,7 +2021,7 @@ static int kdb_lsmod(int argc, const char **argv)
97287 continue;
97288
97289 kdb_printf("%-20s%8u 0x%p ", mod->name,
97290- mod->core_size, (void *)mod);
97291+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
97292 #ifdef CONFIG_MODULE_UNLOAD
97293 kdb_printf("%4d ", module_refcount(mod));
97294 #endif
97295@@ -2031,7 +2031,7 @@ static int kdb_lsmod(int argc, const char **argv)
97296 kdb_printf(" (Loading)");
97297 else
97298 kdb_printf(" (Live)");
97299- kdb_printf(" 0x%p", mod->module_core);
97300+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
97301
97302 #ifdef CONFIG_MODULE_UNLOAD
97303 {
97304diff --git a/kernel/events/core.c b/kernel/events/core.c
97305index 2fabc06..79cceec 100644
97306--- a/kernel/events/core.c
97307+++ b/kernel/events/core.c
97308@@ -170,8 +170,15 @@ static struct srcu_struct pmus_srcu;
97309 * 0 - disallow raw tracepoint access for unpriv
97310 * 1 - disallow cpu events for unpriv
97311 * 2 - disallow kernel profiling for unpriv
97312+ * 3 - disallow all unpriv perf event use
97313 */
97314-int sysctl_perf_event_paranoid __read_mostly = 1;
97315+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
97316+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
97317+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
97318+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
97319+#else
97320+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
97321+#endif
97322
97323 /* Minimum for 512 kiB + 1 user control page */
97324 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
97325@@ -197,7 +204,7 @@ void update_perf_cpu_limits(void)
97326
97327 tmp *= sysctl_perf_cpu_time_max_percent;
97328 do_div(tmp, 100);
97329- ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
97330+ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
97331 }
97332
97333 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
97334@@ -303,7 +310,7 @@ void perf_sample_event_took(u64 sample_len_ns)
97335 }
97336 }
97337
97338-static atomic64_t perf_event_id;
97339+static atomic64_unchecked_t perf_event_id;
97340
97341 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
97342 enum event_type_t event_type);
97343@@ -3220,7 +3227,7 @@ static void __perf_event_read(void *info)
97344
97345 static inline u64 perf_event_count(struct perf_event *event)
97346 {
97347- return local64_read(&event->count) + atomic64_read(&event->child_count);
97348+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
97349 }
97350
97351 static u64 perf_event_read(struct perf_event *event)
97352@@ -3656,9 +3663,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
97353 mutex_lock(&event->child_mutex);
97354 total += perf_event_read(event);
97355 *enabled += event->total_time_enabled +
97356- atomic64_read(&event->child_total_time_enabled);
97357+ atomic64_read_unchecked(&event->child_total_time_enabled);
97358 *running += event->total_time_running +
97359- atomic64_read(&event->child_total_time_running);
97360+ atomic64_read_unchecked(&event->child_total_time_running);
97361
97362 list_for_each_entry(child, &event->child_list, child_list) {
97363 total += perf_event_read(child);
97364@@ -4147,10 +4154,10 @@ void perf_event_update_userpage(struct perf_event *event)
97365 userpg->offset -= local64_read(&event->hw.prev_count);
97366
97367 userpg->time_enabled = enabled +
97368- atomic64_read(&event->child_total_time_enabled);
97369+ atomic64_read_unchecked(&event->child_total_time_enabled);
97370
97371 userpg->time_running = running +
97372- atomic64_read(&event->child_total_time_running);
97373+ atomic64_read_unchecked(&event->child_total_time_running);
97374
97375 arch_perf_update_userpage(event, userpg, now);
97376
97377@@ -4740,7 +4747,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
97378
97379 /* Data. */
97380 sp = perf_user_stack_pointer(regs);
97381- rem = __output_copy_user(handle, (void *) sp, dump_size);
97382+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
97383 dyn_size = dump_size - rem;
97384
97385 perf_output_skip(handle, rem);
97386@@ -4831,11 +4838,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
97387 values[n++] = perf_event_count(event);
97388 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
97389 values[n++] = enabled +
97390- atomic64_read(&event->child_total_time_enabled);
97391+ atomic64_read_unchecked(&event->child_total_time_enabled);
97392 }
97393 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
97394 values[n++] = running +
97395- atomic64_read(&event->child_total_time_running);
97396+ atomic64_read_unchecked(&event->child_total_time_running);
97397 }
97398 if (read_format & PERF_FORMAT_ID)
97399 values[n++] = primary_event_id(event);
97400@@ -7180,7 +7187,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
97401 event->parent = parent_event;
97402
97403 event->ns = get_pid_ns(task_active_pid_ns(current));
97404- event->id = atomic64_inc_return(&perf_event_id);
97405+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
97406
97407 event->state = PERF_EVENT_STATE_INACTIVE;
97408
97409@@ -7470,6 +7477,11 @@ SYSCALL_DEFINE5(perf_event_open,
97410 if (flags & ~PERF_FLAG_ALL)
97411 return -EINVAL;
97412
97413+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
97414+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
97415+ return -EACCES;
97416+#endif
97417+
97418 err = perf_copy_attr(attr_uptr, &attr);
97419 if (err)
97420 return err;
97421@@ -7892,10 +7904,10 @@ static void sync_child_event(struct perf_event *child_event,
97422 /*
97423 * Add back the child's count to the parent's count:
97424 */
97425- atomic64_add(child_val, &parent_event->child_count);
97426- atomic64_add(child_event->total_time_enabled,
97427+ atomic64_add_unchecked(child_val, &parent_event->child_count);
97428+ atomic64_add_unchecked(child_event->total_time_enabled,
97429 &parent_event->child_total_time_enabled);
97430- atomic64_add(child_event->total_time_running,
97431+ atomic64_add_unchecked(child_event->total_time_running,
97432 &parent_event->child_total_time_running);
97433
97434 /*
97435diff --git a/kernel/events/internal.h b/kernel/events/internal.h
97436index 569b2187..19940d9 100644
97437--- a/kernel/events/internal.h
97438+++ b/kernel/events/internal.h
97439@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
97440 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
97441 }
97442
97443-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
97444+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
97445 static inline unsigned long \
97446 func_name(struct perf_output_handle *handle, \
97447- const void *buf, unsigned long len) \
97448+ const void user *buf, unsigned long len) \
97449 { \
97450 unsigned long size, written; \
97451 \
97452@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
97453 return 0;
97454 }
97455
97456-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
97457+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
97458
97459 static inline unsigned long
97460 memcpy_skip(void *dst, const void *src, unsigned long n)
97461@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
97462 return 0;
97463 }
97464
97465-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
97466+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
97467
97468 #ifndef arch_perf_out_copy_user
97469 #define arch_perf_out_copy_user arch_perf_out_copy_user
97470@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
97471 }
97472 #endif
97473
97474-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
97475+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
97476
97477 /* Callchain handling */
97478 extern struct perf_callchain_entry *
97479diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
97480index cb346f2..e4dc317 100644
97481--- a/kernel/events/uprobes.c
97482+++ b/kernel/events/uprobes.c
97483@@ -1670,7 +1670,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
97484 {
97485 struct page *page;
97486 uprobe_opcode_t opcode;
97487- int result;
97488+ long result;
97489
97490 pagefault_disable();
97491 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
97492diff --git a/kernel/exit.c b/kernel/exit.c
97493index feff10b..f623dd5 100644
97494--- a/kernel/exit.c
97495+++ b/kernel/exit.c
97496@@ -171,6 +171,10 @@ void release_task(struct task_struct *p)
97497 struct task_struct *leader;
97498 int zap_leader;
97499 repeat:
97500+#ifdef CONFIG_NET
97501+ gr_del_task_from_ip_table(p);
97502+#endif
97503+
97504 /* don't need to get the RCU readlock here - the process is dead and
97505 * can't be modifying its own credentials. But shut RCU-lockdep up */
97506 rcu_read_lock();
97507@@ -656,6 +660,8 @@ void do_exit(long code)
97508 int group_dead;
97509 TASKS_RCU(int tasks_rcu_i);
97510
97511+ set_fs(USER_DS);
97512+
97513 profile_task_exit(tsk);
97514
97515 WARN_ON(blk_needs_flush_plug(tsk));
97516@@ -672,7 +678,6 @@ void do_exit(long code)
97517 * mm_release()->clear_child_tid() from writing to a user-controlled
97518 * kernel address.
97519 */
97520- set_fs(USER_DS);
97521
97522 ptrace_event(PTRACE_EVENT_EXIT, code);
97523
97524@@ -730,6 +735,9 @@ void do_exit(long code)
97525 tsk->exit_code = code;
97526 taskstats_exit(tsk, group_dead);
97527
97528+ gr_acl_handle_psacct(tsk, code);
97529+ gr_acl_handle_exit();
97530+
97531 exit_mm(tsk);
97532
97533 if (group_dead)
97534@@ -849,7 +857,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
97535 * Take down every thread in the group. This is called by fatal signals
97536 * as well as by sys_exit_group (below).
97537 */
97538-void
97539+__noreturn void
97540 do_group_exit(int exit_code)
97541 {
97542 struct signal_struct *sig = current->signal;
97543diff --git a/kernel/fork.c b/kernel/fork.c
97544index cf65139..704476e 100644
97545--- a/kernel/fork.c
97546+++ b/kernel/fork.c
97547@@ -177,12 +177,54 @@ static void free_thread_info(struct thread_info *ti)
97548 void thread_info_cache_init(void)
97549 {
97550 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
97551- THREAD_SIZE, 0, NULL);
97552+ THREAD_SIZE, SLAB_USERCOPY, NULL);
97553 BUG_ON(thread_info_cache == NULL);
97554 }
97555 # endif
97556 #endif
97557
97558+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
97559+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
97560+ int node, void **lowmem_stack)
97561+{
97562+ struct page *pages[THREAD_SIZE / PAGE_SIZE];
97563+ void *ret = NULL;
97564+ unsigned int i;
97565+
97566+ *lowmem_stack = alloc_thread_info_node(tsk, node);
97567+ if (*lowmem_stack == NULL)
97568+ goto out;
97569+
97570+ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
97571+ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE));
97572+
97573+ /* use VM_IOREMAP to gain THREAD_SIZE alignment */
97574+ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
97575+ if (ret == NULL) {
97576+ free_thread_info(*lowmem_stack);
97577+ *lowmem_stack = NULL;
97578+ }
97579+
97580+out:
97581+ return ret;
97582+}
97583+
97584+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
97585+{
97586+ unmap_process_stacks(tsk);
97587+}
97588+#else
97589+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
97590+ int node, void **lowmem_stack)
97591+{
97592+ return alloc_thread_info_node(tsk, node);
97593+}
97594+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
97595+{
97596+ free_thread_info(ti);
97597+}
97598+#endif
97599+
97600 /* SLAB cache for signal_struct structures (tsk->signal) */
97601 static struct kmem_cache *signal_cachep;
97602
97603@@ -201,18 +243,22 @@ struct kmem_cache *vm_area_cachep;
97604 /* SLAB cache for mm_struct structures (tsk->mm) */
97605 static struct kmem_cache *mm_cachep;
97606
97607-static void account_kernel_stack(struct thread_info *ti, int account)
97608+static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account)
97609 {
97610+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
97611+ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack));
97612+#else
97613 struct zone *zone = page_zone(virt_to_page(ti));
97614+#endif
97615
97616 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
97617 }
97618
97619 void free_task(struct task_struct *tsk)
97620 {
97621- account_kernel_stack(tsk->stack, -1);
97622+ account_kernel_stack(tsk, tsk->stack, -1);
97623 arch_release_thread_info(tsk->stack);
97624- free_thread_info(tsk->stack);
97625+ gr_free_thread_info(tsk, tsk->stack);
97626 rt_mutex_debug_task_free(tsk);
97627 ftrace_graph_exit_task(tsk);
97628 put_seccomp_filter(tsk);
97629@@ -306,6 +352,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
97630 {
97631 struct task_struct *tsk;
97632 struct thread_info *ti;
97633+ void *lowmem_stack;
97634 int node = tsk_fork_get_node(orig);
97635 int err;
97636
97637@@ -313,7 +360,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
97638 if (!tsk)
97639 return NULL;
97640
97641- ti = alloc_thread_info_node(tsk, node);
97642+ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack);
97643 if (!ti)
97644 goto free_tsk;
97645
97646@@ -322,6 +369,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
97647 goto free_ti;
97648
97649 tsk->stack = ti;
97650+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
97651+ tsk->lowmem_stack = lowmem_stack;
97652+#endif
97653 #ifdef CONFIG_SECCOMP
97654 /*
97655 * We must handle setting up seccomp filters once we're under
97656@@ -338,7 +388,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
97657 set_task_stack_end_magic(tsk);
97658
97659 #ifdef CONFIG_CC_STACKPROTECTOR
97660- tsk->stack_canary = get_random_int();
97661+ tsk->stack_canary = pax_get_random_long();
97662 #endif
97663
97664 /*
97665@@ -352,24 +402,89 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
97666 tsk->splice_pipe = NULL;
97667 tsk->task_frag.page = NULL;
97668
97669- account_kernel_stack(ti, 1);
97670+ account_kernel_stack(tsk, ti, 1);
97671
97672 return tsk;
97673
97674 free_ti:
97675- free_thread_info(ti);
97676+ gr_free_thread_info(tsk, ti);
97677 free_tsk:
97678 free_task_struct(tsk);
97679 return NULL;
97680 }
97681
97682 #ifdef CONFIG_MMU
97683-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
97684+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
97685+{
97686+ struct vm_area_struct *tmp;
97687+ unsigned long charge;
97688+ struct file *file;
97689+ int retval;
97690+
97691+ charge = 0;
97692+ if (mpnt->vm_flags & VM_ACCOUNT) {
97693+ unsigned long len = vma_pages(mpnt);
97694+
97695+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
97696+ goto fail_nomem;
97697+ charge = len;
97698+ }
97699+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97700+ if (!tmp)
97701+ goto fail_nomem;
97702+ *tmp = *mpnt;
97703+ tmp->vm_mm = mm;
97704+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
97705+ retval = vma_dup_policy(mpnt, tmp);
97706+ if (retval)
97707+ goto fail_nomem_policy;
97708+ if (anon_vma_fork(tmp, mpnt))
97709+ goto fail_nomem_anon_vma_fork;
97710+ tmp->vm_flags &= ~VM_LOCKED;
97711+ tmp->vm_next = tmp->vm_prev = NULL;
97712+ tmp->vm_mirror = NULL;
97713+ file = tmp->vm_file;
97714+ if (file) {
97715+ struct inode *inode = file_inode(file);
97716+ struct address_space *mapping = file->f_mapping;
97717+
97718+ get_file(file);
97719+ if (tmp->vm_flags & VM_DENYWRITE)
97720+ atomic_dec(&inode->i_writecount);
97721+ i_mmap_lock_write(mapping);
97722+ if (tmp->vm_flags & VM_SHARED)
97723+ atomic_inc(&mapping->i_mmap_writable);
97724+ flush_dcache_mmap_lock(mapping);
97725+ /* insert tmp into the share list, just after mpnt */
97726+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
97727+ flush_dcache_mmap_unlock(mapping);
97728+ i_mmap_unlock_write(mapping);
97729+ }
97730+
97731+ /*
97732+ * Clear hugetlb-related page reserves for children. This only
97733+ * affects MAP_PRIVATE mappings. Faults generated by the child
97734+ * are not guaranteed to succeed, even if read-only
97735+ */
97736+ if (is_vm_hugetlb_page(tmp))
97737+ reset_vma_resv_huge_pages(tmp);
97738+
97739+ return tmp;
97740+
97741+fail_nomem_anon_vma_fork:
97742+ mpol_put(vma_policy(tmp));
97743+fail_nomem_policy:
97744+ kmem_cache_free(vm_area_cachep, tmp);
97745+fail_nomem:
97746+ vm_unacct_memory(charge);
97747+ return NULL;
97748+}
97749+
97750+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
97751 {
97752 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
97753 struct rb_node **rb_link, *rb_parent;
97754 int retval;
97755- unsigned long charge;
97756
97757 uprobe_start_dup_mmap();
97758 down_write(&oldmm->mmap_sem);
97759@@ -397,51 +512,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
97760
97761 prev = NULL;
97762 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
97763- struct file *file;
97764-
97765 if (mpnt->vm_flags & VM_DONTCOPY) {
97766 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
97767 -vma_pages(mpnt));
97768 continue;
97769 }
97770- charge = 0;
97771- if (mpnt->vm_flags & VM_ACCOUNT) {
97772- unsigned long len = vma_pages(mpnt);
97773-
97774- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
97775- goto fail_nomem;
97776- charge = len;
97777- }
97778- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97779- if (!tmp)
97780- goto fail_nomem;
97781- *tmp = *mpnt;
97782- INIT_LIST_HEAD(&tmp->anon_vma_chain);
97783- retval = vma_dup_policy(mpnt, tmp);
97784- if (retval)
97785- goto fail_nomem_policy;
97786- tmp->vm_mm = mm;
97787- if (anon_vma_fork(tmp, mpnt))
97788- goto fail_nomem_anon_vma_fork;
97789- tmp->vm_flags &= ~VM_LOCKED;
97790- tmp->vm_next = tmp->vm_prev = NULL;
97791- file = tmp->vm_file;
97792- if (file) {
97793- struct inode *inode = file_inode(file);
97794- struct address_space *mapping = file->f_mapping;
97795-
97796- get_file(file);
97797- if (tmp->vm_flags & VM_DENYWRITE)
97798- atomic_dec(&inode->i_writecount);
97799- i_mmap_lock_write(mapping);
97800- if (tmp->vm_flags & VM_SHARED)
97801- atomic_inc(&mapping->i_mmap_writable);
97802- flush_dcache_mmap_lock(mapping);
97803- /* insert tmp into the share list, just after mpnt */
97804- vma_interval_tree_insert_after(tmp, mpnt,
97805- &mapping->i_mmap);
97806- flush_dcache_mmap_unlock(mapping);
97807- i_mmap_unlock_write(mapping);
97808+ tmp = dup_vma(mm, oldmm, mpnt);
97809+ if (!tmp) {
97810+ retval = -ENOMEM;
97811+ goto out;
97812 }
97813
97814 /*
97815@@ -473,6 +552,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
97816 if (retval)
97817 goto out;
97818 }
97819+
97820+#ifdef CONFIG_PAX_SEGMEXEC
97821+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
97822+ struct vm_area_struct *mpnt_m;
97823+
97824+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
97825+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
97826+
97827+ if (!mpnt->vm_mirror)
97828+ continue;
97829+
97830+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
97831+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
97832+ mpnt->vm_mirror = mpnt_m;
97833+ } else {
97834+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
97835+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
97836+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
97837+ mpnt->vm_mirror->vm_mirror = mpnt;
97838+ }
97839+ }
97840+ BUG_ON(mpnt_m);
97841+ }
97842+#endif
97843+
97844 /* a new mm has just been created */
97845 arch_dup_mmap(oldmm, mm);
97846 retval = 0;
97847@@ -482,14 +586,6 @@ out:
97848 up_write(&oldmm->mmap_sem);
97849 uprobe_end_dup_mmap();
97850 return retval;
97851-fail_nomem_anon_vma_fork:
97852- mpol_put(vma_policy(tmp));
97853-fail_nomem_policy:
97854- kmem_cache_free(vm_area_cachep, tmp);
97855-fail_nomem:
97856- retval = -ENOMEM;
97857- vm_unacct_memory(charge);
97858- goto out;
97859 }
97860
97861 static inline int mm_alloc_pgd(struct mm_struct *mm)
97862@@ -739,8 +835,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
97863 return ERR_PTR(err);
97864
97865 mm = get_task_mm(task);
97866- if (mm && mm != current->mm &&
97867- !ptrace_may_access(task, mode)) {
97868+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
97869+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
97870 mmput(mm);
97871 mm = ERR_PTR(-EACCES);
97872 }
97873@@ -943,13 +1039,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
97874 spin_unlock(&fs->lock);
97875 return -EAGAIN;
97876 }
97877- fs->users++;
97878+ atomic_inc(&fs->users);
97879 spin_unlock(&fs->lock);
97880 return 0;
97881 }
97882 tsk->fs = copy_fs_struct(fs);
97883 if (!tsk->fs)
97884 return -ENOMEM;
97885+ /* Carry through gr_chroot_dentry and is_chrooted instead
97886+ of recomputing it here. Already copied when the task struct
97887+ is duplicated. This allows pivot_root to not be treated as
97888+ a chroot
97889+ */
97890+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
97891+
97892 return 0;
97893 }
97894
97895@@ -1187,7 +1290,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
97896 * parts of the process environment (as per the clone
97897 * flags). The actual kick-off is left to the caller.
97898 */
97899-static struct task_struct *copy_process(unsigned long clone_flags,
97900+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
97901 unsigned long stack_start,
97902 unsigned long stack_size,
97903 int __user *child_tidptr,
97904@@ -1258,6 +1361,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
97905 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
97906 #endif
97907 retval = -EAGAIN;
97908+
97909+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
97910+
97911 if (atomic_read(&p->real_cred->user->processes) >=
97912 task_rlimit(p, RLIMIT_NPROC)) {
97913 if (p->real_cred->user != INIT_USER &&
97914@@ -1507,6 +1613,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
97915 goto bad_fork_free_pid;
97916 }
97917
97918+ /* synchronizes with gr_set_acls()
97919+ we need to call this past the point of no return for fork()
97920+ */
97921+ gr_copy_label(p);
97922+
97923 if (likely(p->pid)) {
97924 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
97925
97926@@ -1597,6 +1708,8 @@ bad_fork_cleanup_count:
97927 bad_fork_free:
97928 free_task(p);
97929 fork_out:
97930+ gr_log_forkfail(retval);
97931+
97932 return ERR_PTR(retval);
97933 }
97934
97935@@ -1658,6 +1771,7 @@ long do_fork(unsigned long clone_flags,
97936
97937 p = copy_process(clone_flags, stack_start, stack_size,
97938 child_tidptr, NULL, trace);
97939+ add_latent_entropy();
97940 /*
97941 * Do this prior waking up the new thread - the thread pointer
97942 * might get invalid after that point, if the thread exits quickly.
97943@@ -1674,6 +1788,8 @@ long do_fork(unsigned long clone_flags,
97944 if (clone_flags & CLONE_PARENT_SETTID)
97945 put_user(nr, parent_tidptr);
97946
97947+ gr_handle_brute_check();
97948+
97949 if (clone_flags & CLONE_VFORK) {
97950 p->vfork_done = &vfork;
97951 init_completion(&vfork);
97952@@ -1792,7 +1908,7 @@ void __init proc_caches_init(void)
97953 mm_cachep = kmem_cache_create("mm_struct",
97954 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
97955 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
97956- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
97957+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
97958 mmap_init();
97959 nsproxy_cache_init();
97960 }
97961@@ -1832,7 +1948,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
97962 return 0;
97963
97964 /* don't need lock here; in the worst case we'll do useless copy */
97965- if (fs->users == 1)
97966+ if (atomic_read(&fs->users) == 1)
97967 return 0;
97968
97969 *new_fsp = copy_fs_struct(fs);
97970@@ -1944,7 +2060,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
97971 fs = current->fs;
97972 spin_lock(&fs->lock);
97973 current->fs = new_fs;
97974- if (--fs->users)
97975+ gr_set_chroot_entries(current, &current->fs->root);
97976+ if (atomic_dec_return(&fs->users))
97977 new_fs = NULL;
97978 else
97979 new_fs = fs;
97980diff --git a/kernel/futex.c b/kernel/futex.c
97981index 2a5e383..878bac6 100644
97982--- a/kernel/futex.c
97983+++ b/kernel/futex.c
97984@@ -201,7 +201,7 @@ struct futex_pi_state {
97985 atomic_t refcount;
97986
97987 union futex_key key;
97988-};
97989+} __randomize_layout;
97990
97991 /**
97992 * struct futex_q - The hashed futex queue entry, one per waiting task
97993@@ -235,7 +235,7 @@ struct futex_q {
97994 struct rt_mutex_waiter *rt_waiter;
97995 union futex_key *requeue_pi_key;
97996 u32 bitset;
97997-};
97998+} __randomize_layout;
97999
98000 static const struct futex_q futex_q_init = {
98001 /* list gets initialized in queue_me()*/
98002@@ -402,6 +402,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
98003 struct page *page, *page_head;
98004 int err, ro = 0;
98005
98006+#ifdef CONFIG_PAX_SEGMEXEC
98007+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
98008+ return -EFAULT;
98009+#endif
98010+
98011 /*
98012 * The futex address must be "naturally" aligned.
98013 */
98014@@ -601,7 +606,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
98015
98016 static int get_futex_value_locked(u32 *dest, u32 __user *from)
98017 {
98018- int ret;
98019+ unsigned long ret;
98020
98021 pagefault_disable();
98022 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
98023@@ -3006,6 +3011,7 @@ static void __init futex_detect_cmpxchg(void)
98024 {
98025 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
98026 u32 curval;
98027+ mm_segment_t oldfs;
98028
98029 /*
98030 * This will fail and we want it. Some arch implementations do
98031@@ -3017,8 +3023,11 @@ static void __init futex_detect_cmpxchg(void)
98032 * implementation, the non-functional ones will return
98033 * -ENOSYS.
98034 */
98035+ oldfs = get_fs();
98036+ set_fs(USER_DS);
98037 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
98038 futex_cmpxchg_enabled = 1;
98039+ set_fs(oldfs);
98040 #endif
98041 }
98042
98043diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
98044index 55c8c93..9ba7ad6 100644
98045--- a/kernel/futex_compat.c
98046+++ b/kernel/futex_compat.c
98047@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
98048 return 0;
98049 }
98050
98051-static void __user *futex_uaddr(struct robust_list __user *entry,
98052+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
98053 compat_long_t futex_offset)
98054 {
98055 compat_uptr_t base = ptr_to_compat(entry);
98056diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
98057index b358a80..fc25240 100644
98058--- a/kernel/gcov/base.c
98059+++ b/kernel/gcov/base.c
98060@@ -114,11 +114,6 @@ void gcov_enable_events(void)
98061 }
98062
98063 #ifdef CONFIG_MODULES
98064-static inline int within(void *addr, void *start, unsigned long size)
98065-{
98066- return ((addr >= start) && (addr < start + size));
98067-}
98068-
98069 /* Update list and generate events when modules are unloaded. */
98070 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
98071 void *data)
98072@@ -133,7 +128,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
98073
98074 /* Remove entries located in module from linked list. */
98075 while ((info = gcov_info_next(info))) {
98076- if (within(info, mod->module_core, mod->core_size)) {
98077+ if (within_module_core_rw((unsigned long)info, mod)) {
98078 gcov_info_unlink(prev, info);
98079 if (gcov_events_enabled)
98080 gcov_event(GCOV_REMOVE, info);
98081diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
98082index 886d09e..c7ff4e5 100644
98083--- a/kernel/irq/manage.c
98084+++ b/kernel/irq/manage.c
98085@@ -874,7 +874,7 @@ static int irq_thread(void *data)
98086
98087 action_ret = handler_fn(desc, action);
98088 if (action_ret == IRQ_HANDLED)
98089- atomic_inc(&desc->threads_handled);
98090+ atomic_inc_unchecked(&desc->threads_handled);
98091
98092 wake_threads_waitq(desc);
98093 }
98094diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
98095index e2514b0..de3dfe0 100644
98096--- a/kernel/irq/spurious.c
98097+++ b/kernel/irq/spurious.c
98098@@ -337,7 +337,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
98099 * count. We just care about the count being
98100 * different than the one we saw before.
98101 */
98102- handled = atomic_read(&desc->threads_handled);
98103+ handled = atomic_read_unchecked(&desc->threads_handled);
98104 handled |= SPURIOUS_DEFERRED;
98105 if (handled != desc->threads_handled_last) {
98106 action_ret = IRQ_HANDLED;
98107diff --git a/kernel/jump_label.c b/kernel/jump_label.c
98108index 9019f15..9a3c42e 100644
98109--- a/kernel/jump_label.c
98110+++ b/kernel/jump_label.c
98111@@ -14,6 +14,7 @@
98112 #include <linux/err.h>
98113 #include <linux/static_key.h>
98114 #include <linux/jump_label_ratelimit.h>
98115+#include <linux/mm.h>
98116
98117 #ifdef HAVE_JUMP_LABEL
98118
98119@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
98120
98121 size = (((unsigned long)stop - (unsigned long)start)
98122 / sizeof(struct jump_entry));
98123+ pax_open_kernel();
98124 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
98125+ pax_close_kernel();
98126 }
98127
98128 static void jump_label_update(struct static_key *key, int enable);
98129@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
98130 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
98131 struct jump_entry *iter;
98132
98133+ pax_open_kernel();
98134 for (iter = iter_start; iter < iter_stop; iter++) {
98135 if (within_module_init(iter->code, mod))
98136 iter->code = 0;
98137 }
98138+ pax_close_kernel();
98139 }
98140
98141 static int
98142diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
98143index 5c5987f..bc502b0 100644
98144--- a/kernel/kallsyms.c
98145+++ b/kernel/kallsyms.c
98146@@ -11,6 +11,9 @@
98147 * Changed the compression method from stem compression to "table lookup"
98148 * compression (see scripts/kallsyms.c for a more complete description)
98149 */
98150+#ifdef CONFIG_GRKERNSEC_HIDESYM
98151+#define __INCLUDED_BY_HIDESYM 1
98152+#endif
98153 #include <linux/kallsyms.h>
98154 #include <linux/module.h>
98155 #include <linux/init.h>
98156@@ -54,12 +57,33 @@ extern const unsigned long kallsyms_markers[] __weak;
98157
98158 static inline int is_kernel_inittext(unsigned long addr)
98159 {
98160+ if (system_state != SYSTEM_BOOTING)
98161+ return 0;
98162+
98163 if (addr >= (unsigned long)_sinittext
98164 && addr <= (unsigned long)_einittext)
98165 return 1;
98166 return 0;
98167 }
98168
98169+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
98170+#ifdef CONFIG_MODULES
98171+static inline int is_module_text(unsigned long addr)
98172+{
98173+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
98174+ return 1;
98175+
98176+ addr = ktla_ktva(addr);
98177+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
98178+}
98179+#else
98180+static inline int is_module_text(unsigned long addr)
98181+{
98182+ return 0;
98183+}
98184+#endif
98185+#endif
98186+
98187 static inline int is_kernel_text(unsigned long addr)
98188 {
98189 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
98190@@ -70,13 +94,28 @@ static inline int is_kernel_text(unsigned long addr)
98191
98192 static inline int is_kernel(unsigned long addr)
98193 {
98194+
98195+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
98196+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
98197+ return 1;
98198+
98199+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
98200+#else
98201 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
98202+#endif
98203+
98204 return 1;
98205 return in_gate_area_no_mm(addr);
98206 }
98207
98208 static int is_ksym_addr(unsigned long addr)
98209 {
98210+
98211+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
98212+ if (is_module_text(addr))
98213+ return 0;
98214+#endif
98215+
98216 if (all_var)
98217 return is_kernel(addr);
98218
98219@@ -481,7 +520,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
98220
98221 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
98222 {
98223- iter->name[0] = '\0';
98224 iter->nameoff = get_symbol_offset(new_pos);
98225 iter->pos = new_pos;
98226 }
98227@@ -529,6 +567,11 @@ static int s_show(struct seq_file *m, void *p)
98228 {
98229 struct kallsym_iter *iter = m->private;
98230
98231+#ifdef CONFIG_GRKERNSEC_HIDESYM
98232+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
98233+ return 0;
98234+#endif
98235+
98236 /* Some debugging symbols have no name. Ignore them. */
98237 if (!iter->name[0])
98238 return 0;
98239@@ -542,6 +585,7 @@ static int s_show(struct seq_file *m, void *p)
98240 */
98241 type = iter->exported ? toupper(iter->type) :
98242 tolower(iter->type);
98243+
98244 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
98245 type, iter->name, iter->module_name);
98246 } else
98247diff --git a/kernel/kcmp.c b/kernel/kcmp.c
98248index 0aa69ea..a7fcafb 100644
98249--- a/kernel/kcmp.c
98250+++ b/kernel/kcmp.c
98251@@ -100,6 +100,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
98252 struct task_struct *task1, *task2;
98253 int ret;
98254
98255+#ifdef CONFIG_GRKERNSEC
98256+ return -ENOSYS;
98257+#endif
98258+
98259 rcu_read_lock();
98260
98261 /*
98262diff --git a/kernel/kexec.c b/kernel/kexec.c
98263index 38c25b1..12b3f69 100644
98264--- a/kernel/kexec.c
98265+++ b/kernel/kexec.c
98266@@ -1348,7 +1348,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
98267 compat_ulong_t, flags)
98268 {
98269 struct compat_kexec_segment in;
98270- struct kexec_segment out, __user *ksegments;
98271+ struct kexec_segment out;
98272+ struct kexec_segment __user *ksegments;
98273 unsigned long i, result;
98274
98275 /* Don't allow clients that don't understand the native
98276diff --git a/kernel/kmod.c b/kernel/kmod.c
98277index 2777f40..a689506 100644
98278--- a/kernel/kmod.c
98279+++ b/kernel/kmod.c
98280@@ -68,7 +68,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
98281 kfree(info->argv);
98282 }
98283
98284-static int call_modprobe(char *module_name, int wait)
98285+static int call_modprobe(char *module_name, char *module_param, int wait)
98286 {
98287 struct subprocess_info *info;
98288 static char *envp[] = {
98289@@ -78,7 +78,7 @@ static int call_modprobe(char *module_name, int wait)
98290 NULL
98291 };
98292
98293- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
98294+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
98295 if (!argv)
98296 goto out;
98297
98298@@ -90,7 +90,8 @@ static int call_modprobe(char *module_name, int wait)
98299 argv[1] = "-q";
98300 argv[2] = "--";
98301 argv[3] = module_name; /* check free_modprobe_argv() */
98302- argv[4] = NULL;
98303+ argv[4] = module_param;
98304+ argv[5] = NULL;
98305
98306 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
98307 NULL, free_modprobe_argv, NULL);
98308@@ -122,9 +123,8 @@ out:
98309 * If module auto-loading support is disabled then this function
98310 * becomes a no-operation.
98311 */
98312-int __request_module(bool wait, const char *fmt, ...)
98313+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
98314 {
98315- va_list args;
98316 char module_name[MODULE_NAME_LEN];
98317 unsigned int max_modprobes;
98318 int ret;
98319@@ -143,9 +143,7 @@ int __request_module(bool wait, const char *fmt, ...)
98320 if (!modprobe_path[0])
98321 return 0;
98322
98323- va_start(args, fmt);
98324- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
98325- va_end(args);
98326+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
98327 if (ret >= MODULE_NAME_LEN)
98328 return -ENAMETOOLONG;
98329
98330@@ -153,6 +151,20 @@ int __request_module(bool wait, const char *fmt, ...)
98331 if (ret)
98332 return ret;
98333
98334+#ifdef CONFIG_GRKERNSEC_MODHARDEN
98335+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
98336+ /* hack to workaround consolekit/udisks stupidity */
98337+ read_lock(&tasklist_lock);
98338+ if (!strcmp(current->comm, "mount") &&
98339+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
98340+ read_unlock(&tasklist_lock);
98341+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
98342+ return -EPERM;
98343+ }
98344+ read_unlock(&tasklist_lock);
98345+ }
98346+#endif
98347+
98348 /* If modprobe needs a service that is in a module, we get a recursive
98349 * loop. Limit the number of running kmod threads to max_threads/2 or
98350 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
98351@@ -181,16 +193,61 @@ int __request_module(bool wait, const char *fmt, ...)
98352
98353 trace_module_request(module_name, wait, _RET_IP_);
98354
98355- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
98356+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
98357
98358 atomic_dec(&kmod_concurrent);
98359 return ret;
98360 }
98361+
98362+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
98363+{
98364+ va_list args;
98365+ int ret;
98366+
98367+ va_start(args, fmt);
98368+ ret = ____request_module(wait, module_param, fmt, args);
98369+ va_end(args);
98370+
98371+ return ret;
98372+}
98373+
98374+int __request_module(bool wait, const char *fmt, ...)
98375+{
98376+ va_list args;
98377+ int ret;
98378+
98379+#ifdef CONFIG_GRKERNSEC_MODHARDEN
98380+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
98381+ char module_param[MODULE_NAME_LEN];
98382+
98383+ memset(module_param, 0, sizeof(module_param));
98384+
98385+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
98386+
98387+ va_start(args, fmt);
98388+ ret = ____request_module(wait, module_param, fmt, args);
98389+ va_end(args);
98390+
98391+ return ret;
98392+ }
98393+#endif
98394+
98395+ va_start(args, fmt);
98396+ ret = ____request_module(wait, NULL, fmt, args);
98397+ va_end(args);
98398+
98399+ return ret;
98400+}
98401+
98402 EXPORT_SYMBOL(__request_module);
98403 #endif /* CONFIG_MODULES */
98404
98405 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
98406 {
98407+#ifdef CONFIG_GRKERNSEC
98408+ kfree(info->path);
98409+ info->path = info->origpath;
98410+#endif
98411 if (info->cleanup)
98412 (*info->cleanup)(info);
98413 kfree(info);
98414@@ -232,6 +289,21 @@ static int ____call_usermodehelper(void *data)
98415 */
98416 set_user_nice(current, 0);
98417
98418+#ifdef CONFIG_GRKERNSEC
98419+ /* this is race-free as far as userland is concerned as we copied
98420+ out the path to be used prior to this point and are now operating
98421+ on that copy
98422+ */
98423+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
98424+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
98425+ strncmp(sub_info->path, "/usr/libexec/", 13) && strncmp(sub_info->path, "/usr/bin/", 9) &&
98426+ strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
98427+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of permitted system paths\n", sub_info->path);
98428+ retval = -EPERM;
98429+ goto out;
98430+ }
98431+#endif
98432+
98433 retval = -ENOMEM;
98434 new = prepare_kernel_cred(current);
98435 if (!new)
98436@@ -254,8 +326,8 @@ static int ____call_usermodehelper(void *data)
98437 commit_creds(new);
98438
98439 retval = do_execve(getname_kernel(sub_info->path),
98440- (const char __user *const __user *)sub_info->argv,
98441- (const char __user *const __user *)sub_info->envp);
98442+ (const char __user *const __force_user *)sub_info->argv,
98443+ (const char __user *const __force_user *)sub_info->envp);
98444 out:
98445 sub_info->retval = retval;
98446 /* wait_for_helper() will call umh_complete if UHM_WAIT_PROC. */
98447@@ -288,7 +360,7 @@ static int wait_for_helper(void *data)
98448 *
98449 * Thus the __user pointer cast is valid here.
98450 */
98451- sys_wait4(pid, (int __user *)&ret, 0, NULL);
98452+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
98453
98454 /*
98455 * If ret is 0, either ____call_usermodehelper failed and the
98456@@ -510,7 +582,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
98457 goto out;
98458
98459 INIT_WORK(&sub_info->work, __call_usermodehelper);
98460+#ifdef CONFIG_GRKERNSEC
98461+ sub_info->origpath = path;
98462+ sub_info->path = kstrdup(path, gfp_mask);
98463+#else
98464 sub_info->path = path;
98465+#endif
98466 sub_info->argv = argv;
98467 sub_info->envp = envp;
98468
98469@@ -612,7 +689,7 @@ EXPORT_SYMBOL(call_usermodehelper);
98470 static int proc_cap_handler(struct ctl_table *table, int write,
98471 void __user *buffer, size_t *lenp, loff_t *ppos)
98472 {
98473- struct ctl_table t;
98474+ ctl_table_no_const t;
98475 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
98476 kernel_cap_t new_cap;
98477 int err, i;
98478diff --git a/kernel/kprobes.c b/kernel/kprobes.c
98479index c90e417..e6c515d 100644
98480--- a/kernel/kprobes.c
98481+++ b/kernel/kprobes.c
98482@@ -31,6 +31,9 @@
98483 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
98484 * <prasanna@in.ibm.com> added function-return probes.
98485 */
98486+#ifdef CONFIG_GRKERNSEC_HIDESYM
98487+#define __INCLUDED_BY_HIDESYM 1
98488+#endif
98489 #include <linux/kprobes.h>
98490 #include <linux/hash.h>
98491 #include <linux/init.h>
98492@@ -122,12 +125,12 @@ enum kprobe_slot_state {
98493
98494 static void *alloc_insn_page(void)
98495 {
98496- return module_alloc(PAGE_SIZE);
98497+ return module_alloc_exec(PAGE_SIZE);
98498 }
98499
98500 static void free_insn_page(void *page)
98501 {
98502- module_memfree(page);
98503+ module_memfree_exec(page);
98504 }
98505
98506 struct kprobe_insn_cache kprobe_insn_slots = {
98507@@ -2198,11 +2201,11 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
98508 kprobe_type = "k";
98509
98510 if (sym)
98511- seq_printf(pi, "%p %s %s+0x%x %s ",
98512+ seq_printf(pi, "%pK %s %s+0x%x %s ",
98513 p->addr, kprobe_type, sym, offset,
98514 (modname ? modname : " "));
98515 else
98516- seq_printf(pi, "%p %s %p ",
98517+ seq_printf(pi, "%pK %s %pK ",
98518 p->addr, kprobe_type, p->addr);
98519
98520 if (!pp)
98521diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
98522index 6683cce..daf8999 100644
98523--- a/kernel/ksysfs.c
98524+++ b/kernel/ksysfs.c
98525@@ -50,6 +50,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
98526 {
98527 if (count+1 > UEVENT_HELPER_PATH_LEN)
98528 return -ENOENT;
98529+ if (!capable(CAP_SYS_ADMIN))
98530+ return -EPERM;
98531 memcpy(uevent_helper, buf, count);
98532 uevent_helper[count] = '\0';
98533 if (count && uevent_helper[count-1] == '\n')
98534@@ -176,7 +178,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
98535 return count;
98536 }
98537
98538-static struct bin_attribute notes_attr = {
98539+static bin_attribute_no_const notes_attr __read_only = {
98540 .attr = {
98541 .name = "notes",
98542 .mode = S_IRUGO,
98543diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
98544index ba77ab5..d6a3e20 100644
98545--- a/kernel/locking/lockdep.c
98546+++ b/kernel/locking/lockdep.c
98547@@ -599,6 +599,10 @@ static int static_obj(void *obj)
98548 end = (unsigned long) &_end,
98549 addr = (unsigned long) obj;
98550
98551+#ifdef CONFIG_PAX_KERNEXEC
98552+ start = ktla_ktva(start);
98553+#endif
98554+
98555 /*
98556 * static variable?
98557 */
98558@@ -743,6 +747,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
98559 if (!static_obj(lock->key)) {
98560 debug_locks_off();
98561 printk("INFO: trying to register non-static key.\n");
98562+ printk("lock:%pS key:%pS.\n", lock, lock->key);
98563 printk("the code is fine but needs lockdep annotation.\n");
98564 printk("turning off the locking correctness validator.\n");
98565 dump_stack();
98566@@ -3088,7 +3093,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
98567 if (!class)
98568 return 0;
98569 }
98570- atomic_inc((atomic_t *)&class->ops);
98571+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
98572 if (very_verbose(class)) {
98573 printk("\nacquire class [%p] %s", class->key, class->name);
98574 if (class->name_version > 1)
98575diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
98576index ef43ac4..2720dfa 100644
98577--- a/kernel/locking/lockdep_proc.c
98578+++ b/kernel/locking/lockdep_proc.c
98579@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
98580 return 0;
98581 }
98582
98583- seq_printf(m, "%p", class->key);
98584+ seq_printf(m, "%pK", class->key);
98585 #ifdef CONFIG_DEBUG_LOCKDEP
98586 seq_printf(m, " OPS:%8ld", class->ops);
98587 #endif
98588@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
98589
98590 list_for_each_entry(entry, &class->locks_after, entry) {
98591 if (entry->distance == 1) {
98592- seq_printf(m, " -> [%p] ", entry->class->key);
98593+ seq_printf(m, " -> [%pK] ", entry->class->key);
98594 print_name(m, entry->class);
98595 seq_puts(m, "\n");
98596 }
98597@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
98598 if (!class->key)
98599 continue;
98600
98601- seq_printf(m, "[%p] ", class->key);
98602+ seq_printf(m, "[%pK] ", class->key);
98603 print_name(m, class);
98604 seq_puts(m, "\n");
98605 }
98606@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
98607 if (!i)
98608 seq_line(m, '-', 40-namelen, namelen);
98609
98610- snprintf(ip, sizeof(ip), "[<%p>]",
98611+ snprintf(ip, sizeof(ip), "[<%pK>]",
98612 (void *)class->contention_point[i]);
98613 seq_printf(m, "%40s %14lu %29s %pS\n",
98614 name, stats->contention_point[i],
98615@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
98616 if (!i)
98617 seq_line(m, '-', 40-namelen, namelen);
98618
98619- snprintf(ip, sizeof(ip), "[<%p>]",
98620+ snprintf(ip, sizeof(ip), "[<%pK>]",
98621 (void *)class->contending_point[i]);
98622 seq_printf(m, "%40s %14lu %29s %pS\n",
98623 name, stats->contending_point[i],
98624diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
98625index d1fe2ba..180cd65e 100644
98626--- a/kernel/locking/mcs_spinlock.h
98627+++ b/kernel/locking/mcs_spinlock.h
98628@@ -78,7 +78,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
98629 */
98630 return;
98631 }
98632- ACCESS_ONCE(prev->next) = node;
98633+ ACCESS_ONCE_RW(prev->next) = node;
98634
98635 /* Wait until the lock holder passes the lock down. */
98636 arch_mcs_spin_lock_contended(&node->locked);
98637diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
98638index 3ef3736..9c951fa 100644
98639--- a/kernel/locking/mutex-debug.c
98640+++ b/kernel/locking/mutex-debug.c
98641@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
98642 }
98643
98644 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
98645- struct thread_info *ti)
98646+ struct task_struct *task)
98647 {
98648 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
98649
98650 /* Mark the current thread as blocked on the lock: */
98651- ti->task->blocked_on = waiter;
98652+ task->blocked_on = waiter;
98653 }
98654
98655 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
98656- struct thread_info *ti)
98657+ struct task_struct *task)
98658 {
98659 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
98660- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
98661- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
98662- ti->task->blocked_on = NULL;
98663+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
98664+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
98665+ task->blocked_on = NULL;
98666
98667 list_del_init(&waiter->list);
98668 waiter->task = NULL;
98669diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
98670index 0799fd3..d06ae3b 100644
98671--- a/kernel/locking/mutex-debug.h
98672+++ b/kernel/locking/mutex-debug.h
98673@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
98674 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
98675 extern void debug_mutex_add_waiter(struct mutex *lock,
98676 struct mutex_waiter *waiter,
98677- struct thread_info *ti);
98678+ struct task_struct *task);
98679 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
98680- struct thread_info *ti);
98681+ struct task_struct *task);
98682 extern void debug_mutex_unlock(struct mutex *lock);
98683 extern void debug_mutex_init(struct mutex *lock, const char *name,
98684 struct lock_class_key *key);
98685diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
98686index 94674e5..de4966f 100644
98687--- a/kernel/locking/mutex.c
98688+++ b/kernel/locking/mutex.c
98689@@ -542,7 +542,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
98690 goto skip_wait;
98691
98692 debug_mutex_lock_common(lock, &waiter);
98693- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
98694+ debug_mutex_add_waiter(lock, &waiter, task);
98695
98696 /* add waiting tasks to the end of the waitqueue (FIFO): */
98697 list_add_tail(&waiter.list, &lock->wait_list);
98698@@ -589,7 +589,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
98699 }
98700 __set_task_state(task, TASK_RUNNING);
98701
98702- mutex_remove_waiter(lock, &waiter, current_thread_info());
98703+ mutex_remove_waiter(lock, &waiter, task);
98704 /* set it to 0 if there are no waiters left: */
98705 if (likely(list_empty(&lock->wait_list)))
98706 atomic_set(&lock->count, 0);
98707@@ -610,7 +610,7 @@ skip_wait:
98708 return 0;
98709
98710 err:
98711- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
98712+ mutex_remove_waiter(lock, &waiter, task);
98713 spin_unlock_mutex(&lock->wait_lock, flags);
98714 debug_mutex_free_waiter(&waiter);
98715 mutex_release(&lock->dep_map, 1, ip);
98716diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
98717index c112d00..1946ad9 100644
98718--- a/kernel/locking/osq_lock.c
98719+++ b/kernel/locking/osq_lock.c
98720@@ -98,7 +98,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
98721
98722 prev = decode_cpu(old);
98723 node->prev = prev;
98724- ACCESS_ONCE(prev->next) = node;
98725+ ACCESS_ONCE_RW(prev->next) = node;
98726
98727 /*
98728 * Normally @prev is untouchable after the above store; because at that
98729@@ -170,8 +170,8 @@ unqueue:
98730 * it will wait in Step-A.
98731 */
98732
98733- ACCESS_ONCE(next->prev) = prev;
98734- ACCESS_ONCE(prev->next) = next;
98735+ ACCESS_ONCE_RW(next->prev) = prev;
98736+ ACCESS_ONCE_RW(prev->next) = next;
98737
98738 return false;
98739 }
98740@@ -193,11 +193,11 @@ void osq_unlock(struct optimistic_spin_queue *lock)
98741 node = this_cpu_ptr(&osq_node);
98742 next = xchg(&node->next, NULL);
98743 if (next) {
98744- ACCESS_ONCE(next->locked) = 1;
98745+ ACCESS_ONCE_RW(next->locked) = 1;
98746 return;
98747 }
98748
98749 next = osq_wait_next(lock, node, NULL);
98750 if (next)
98751- ACCESS_ONCE(next->locked) = 1;
98752+ ACCESS_ONCE_RW(next->locked) = 1;
98753 }
98754diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
98755index 1d96dd0..994ff19 100644
98756--- a/kernel/locking/rtmutex-tester.c
98757+++ b/kernel/locking/rtmutex-tester.c
98758@@ -22,7 +22,7 @@
98759 #define MAX_RT_TEST_MUTEXES 8
98760
98761 static spinlock_t rttest_lock;
98762-static atomic_t rttest_event;
98763+static atomic_unchecked_t rttest_event;
98764
98765 struct test_thread_data {
98766 int opcode;
98767@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
98768
98769 case RTTEST_LOCKCONT:
98770 td->mutexes[td->opdata] = 1;
98771- td->event = atomic_add_return(1, &rttest_event);
98772+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98773 return 0;
98774
98775 case RTTEST_RESET:
98776@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
98777 return 0;
98778
98779 case RTTEST_RESETEVENT:
98780- atomic_set(&rttest_event, 0);
98781+ atomic_set_unchecked(&rttest_event, 0);
98782 return 0;
98783
98784 default:
98785@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
98786 return ret;
98787
98788 td->mutexes[id] = 1;
98789- td->event = atomic_add_return(1, &rttest_event);
98790+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98791 rt_mutex_lock(&mutexes[id]);
98792- td->event = atomic_add_return(1, &rttest_event);
98793+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98794 td->mutexes[id] = 4;
98795 return 0;
98796
98797@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
98798 return ret;
98799
98800 td->mutexes[id] = 1;
98801- td->event = atomic_add_return(1, &rttest_event);
98802+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98803 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
98804- td->event = atomic_add_return(1, &rttest_event);
98805+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98806 td->mutexes[id] = ret ? 0 : 4;
98807 return ret ? -EINTR : 0;
98808
98809@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
98810 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
98811 return ret;
98812
98813- td->event = atomic_add_return(1, &rttest_event);
98814+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98815 rt_mutex_unlock(&mutexes[id]);
98816- td->event = atomic_add_return(1, &rttest_event);
98817+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98818 td->mutexes[id] = 0;
98819 return 0;
98820
98821@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
98822 break;
98823
98824 td->mutexes[dat] = 2;
98825- td->event = atomic_add_return(1, &rttest_event);
98826+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98827 break;
98828
98829 default:
98830@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
98831 return;
98832
98833 td->mutexes[dat] = 3;
98834- td->event = atomic_add_return(1, &rttest_event);
98835+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98836 break;
98837
98838 case RTTEST_LOCKNOWAIT:
98839@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
98840 return;
98841
98842 td->mutexes[dat] = 1;
98843- td->event = atomic_add_return(1, &rttest_event);
98844+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98845 return;
98846
98847 default:
98848diff --git a/kernel/module.c b/kernel/module.c
98849index ec53f59..67d9655 100644
98850--- a/kernel/module.c
98851+++ b/kernel/module.c
98852@@ -59,6 +59,7 @@
98853 #include <linux/jump_label.h>
98854 #include <linux/pfn.h>
98855 #include <linux/bsearch.h>
98856+#include <linux/grsecurity.h>
98857 #include <uapi/linux/module.h>
98858 #include "module-internal.h"
98859
98860@@ -155,7 +156,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
98861
98862 /* Bounds of module allocation, for speeding __module_address.
98863 * Protected by module_mutex. */
98864-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
98865+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
98866+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
98867
98868 int register_module_notifier(struct notifier_block *nb)
98869 {
98870@@ -322,7 +324,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
98871 return true;
98872
98873 list_for_each_entry_rcu(mod, &modules, list) {
98874- struct symsearch arr[] = {
98875+ struct symsearch modarr[] = {
98876 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
98877 NOT_GPL_ONLY, false },
98878 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
98879@@ -347,7 +349,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
98880 if (mod->state == MODULE_STATE_UNFORMED)
98881 continue;
98882
98883- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
98884+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
98885 return true;
98886 }
98887 return false;
98888@@ -487,7 +489,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
98889 if (!pcpusec->sh_size)
98890 return 0;
98891
98892- if (align > PAGE_SIZE) {
98893+ if (align-1 >= PAGE_SIZE) {
98894 pr_warn("%s: per-cpu alignment %li > %li\n",
98895 mod->name, align, PAGE_SIZE);
98896 align = PAGE_SIZE;
98897@@ -1053,7 +1055,7 @@ struct module_attribute module_uevent =
98898 static ssize_t show_coresize(struct module_attribute *mattr,
98899 struct module_kobject *mk, char *buffer)
98900 {
98901- return sprintf(buffer, "%u\n", mk->mod->core_size);
98902+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
98903 }
98904
98905 static struct module_attribute modinfo_coresize =
98906@@ -1062,7 +1064,7 @@ static struct module_attribute modinfo_coresize =
98907 static ssize_t show_initsize(struct module_attribute *mattr,
98908 struct module_kobject *mk, char *buffer)
98909 {
98910- return sprintf(buffer, "%u\n", mk->mod->init_size);
98911+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
98912 }
98913
98914 static struct module_attribute modinfo_initsize =
98915@@ -1154,12 +1156,29 @@ static int check_version(Elf_Shdr *sechdrs,
98916 goto bad_version;
98917 }
98918
98919+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
98920+ /*
98921+ * avoid potentially printing jibberish on attempted load
98922+ * of a module randomized with a different seed
98923+ */
98924+ pr_warn("no symbol version for %s\n", symname);
98925+#else
98926 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
98927+#endif
98928 return 0;
98929
98930 bad_version:
98931+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
98932+ /*
98933+ * avoid potentially printing jibberish on attempted load
98934+ * of a module randomized with a different seed
98935+ */
98936+ pr_warn("attempted module disagrees about version of symbol %s\n",
98937+ symname);
98938+#else
98939 pr_warn("%s: disagrees about version of symbol %s\n",
98940 mod->name, symname);
98941+#endif
98942 return 0;
98943 }
98944
98945@@ -1281,7 +1300,7 @@ resolve_symbol_wait(struct module *mod,
98946 */
98947 #ifdef CONFIG_SYSFS
98948
98949-#ifdef CONFIG_KALLSYMS
98950+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
98951 static inline bool sect_empty(const Elf_Shdr *sect)
98952 {
98953 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
98954@@ -1419,7 +1438,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
98955 {
98956 unsigned int notes, loaded, i;
98957 struct module_notes_attrs *notes_attrs;
98958- struct bin_attribute *nattr;
98959+ bin_attribute_no_const *nattr;
98960
98961 /* failed to create section attributes, so can't create notes */
98962 if (!mod->sect_attrs)
98963@@ -1531,7 +1550,7 @@ static void del_usage_links(struct module *mod)
98964 static int module_add_modinfo_attrs(struct module *mod)
98965 {
98966 struct module_attribute *attr;
98967- struct module_attribute *temp_attr;
98968+ module_attribute_no_const *temp_attr;
98969 int error = 0;
98970 int i;
98971
98972@@ -1741,21 +1760,21 @@ static void set_section_ro_nx(void *base,
98973
98974 static void unset_module_core_ro_nx(struct module *mod)
98975 {
98976- set_page_attributes(mod->module_core + mod->core_text_size,
98977- mod->module_core + mod->core_size,
98978+ set_page_attributes(mod->module_core_rw,
98979+ mod->module_core_rw + mod->core_size_rw,
98980 set_memory_x);
98981- set_page_attributes(mod->module_core,
98982- mod->module_core + mod->core_ro_size,
98983+ set_page_attributes(mod->module_core_rx,
98984+ mod->module_core_rx + mod->core_size_rx,
98985 set_memory_rw);
98986 }
98987
98988 static void unset_module_init_ro_nx(struct module *mod)
98989 {
98990- set_page_attributes(mod->module_init + mod->init_text_size,
98991- mod->module_init + mod->init_size,
98992+ set_page_attributes(mod->module_init_rw,
98993+ mod->module_init_rw + mod->init_size_rw,
98994 set_memory_x);
98995- set_page_attributes(mod->module_init,
98996- mod->module_init + mod->init_ro_size,
98997+ set_page_attributes(mod->module_init_rx,
98998+ mod->module_init_rx + mod->init_size_rx,
98999 set_memory_rw);
99000 }
99001
99002@@ -1768,14 +1787,14 @@ void set_all_modules_text_rw(void)
99003 list_for_each_entry_rcu(mod, &modules, list) {
99004 if (mod->state == MODULE_STATE_UNFORMED)
99005 continue;
99006- if ((mod->module_core) && (mod->core_text_size)) {
99007- set_page_attributes(mod->module_core,
99008- mod->module_core + mod->core_text_size,
99009+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
99010+ set_page_attributes(mod->module_core_rx,
99011+ mod->module_core_rx + mod->core_size_rx,
99012 set_memory_rw);
99013 }
99014- if ((mod->module_init) && (mod->init_text_size)) {
99015- set_page_attributes(mod->module_init,
99016- mod->module_init + mod->init_text_size,
99017+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
99018+ set_page_attributes(mod->module_init_rx,
99019+ mod->module_init_rx + mod->init_size_rx,
99020 set_memory_rw);
99021 }
99022 }
99023@@ -1791,14 +1810,14 @@ void set_all_modules_text_ro(void)
99024 list_for_each_entry_rcu(mod, &modules, list) {
99025 if (mod->state == MODULE_STATE_UNFORMED)
99026 continue;
99027- if ((mod->module_core) && (mod->core_text_size)) {
99028- set_page_attributes(mod->module_core,
99029- mod->module_core + mod->core_text_size,
99030+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
99031+ set_page_attributes(mod->module_core_rx,
99032+ mod->module_core_rx + mod->core_size_rx,
99033 set_memory_ro);
99034 }
99035- if ((mod->module_init) && (mod->init_text_size)) {
99036- set_page_attributes(mod->module_init,
99037- mod->module_init + mod->init_text_size,
99038+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
99039+ set_page_attributes(mod->module_init_rx,
99040+ mod->module_init_rx + mod->init_size_rx,
99041 set_memory_ro);
99042 }
99043 }
99044@@ -1807,7 +1826,15 @@ void set_all_modules_text_ro(void)
99045 #else
99046 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
99047 static void unset_module_core_ro_nx(struct module *mod) { }
99048-static void unset_module_init_ro_nx(struct module *mod) { }
99049+static void unset_module_init_ro_nx(struct module *mod)
99050+{
99051+
99052+#ifdef CONFIG_PAX_KERNEXEC
99053+ set_memory_nx((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
99054+ set_memory_rw((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
99055+#endif
99056+
99057+}
99058 #endif
99059
99060 void __weak module_memfree(void *module_region)
99061@@ -1861,16 +1888,19 @@ static void free_module(struct module *mod)
99062 /* This may be NULL, but that's OK */
99063 unset_module_init_ro_nx(mod);
99064 module_arch_freeing_init(mod);
99065- module_memfree(mod->module_init);
99066+ module_memfree(mod->module_init_rw);
99067+ module_memfree_exec(mod->module_init_rx);
99068 kfree(mod->args);
99069 percpu_modfree(mod);
99070
99071 /* Free lock-classes; relies on the preceding sync_rcu(). */
99072- lockdep_free_key_range(mod->module_core, mod->core_size);
99073+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
99074+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
99075
99076 /* Finally, free the core (containing the module structure) */
99077 unset_module_core_ro_nx(mod);
99078- module_memfree(mod->module_core);
99079+ module_memfree_exec(mod->module_core_rx);
99080+ module_memfree(mod->module_core_rw);
99081
99082 #ifdef CONFIG_MPU
99083 update_protections(current->mm);
99084@@ -1939,9 +1969,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
99085 int ret = 0;
99086 const struct kernel_symbol *ksym;
99087
99088+#ifdef CONFIG_GRKERNSEC_MODHARDEN
99089+ int is_fs_load = 0;
99090+ int register_filesystem_found = 0;
99091+ char *p;
99092+
99093+ p = strstr(mod->args, "grsec_modharden_fs");
99094+ if (p) {
99095+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
99096+ /* copy \0 as well */
99097+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
99098+ is_fs_load = 1;
99099+ }
99100+#endif
99101+
99102 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
99103 const char *name = info->strtab + sym[i].st_name;
99104
99105+#ifdef CONFIG_GRKERNSEC_MODHARDEN
99106+ /* it's a real shame this will never get ripped and copied
99107+ upstream! ;(
99108+ */
99109+ if (is_fs_load && !strcmp(name, "register_filesystem"))
99110+ register_filesystem_found = 1;
99111+#endif
99112+
99113 switch (sym[i].st_shndx) {
99114 case SHN_COMMON:
99115 /* Ignore common symbols */
99116@@ -1966,7 +2018,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
99117 ksym = resolve_symbol_wait(mod, info, name);
99118 /* Ok if resolved. */
99119 if (ksym && !IS_ERR(ksym)) {
99120+ pax_open_kernel();
99121 sym[i].st_value = ksym->value;
99122+ pax_close_kernel();
99123 break;
99124 }
99125
99126@@ -1985,11 +2039,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
99127 secbase = (unsigned long)mod_percpu(mod);
99128 else
99129 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
99130+ pax_open_kernel();
99131 sym[i].st_value += secbase;
99132+ pax_close_kernel();
99133 break;
99134 }
99135 }
99136
99137+#ifdef CONFIG_GRKERNSEC_MODHARDEN
99138+ if (is_fs_load && !register_filesystem_found) {
99139+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
99140+ ret = -EPERM;
99141+ }
99142+#endif
99143+
99144 return ret;
99145 }
99146
99147@@ -2073,22 +2136,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
99148 || s->sh_entsize != ~0UL
99149 || strstarts(sname, ".init"))
99150 continue;
99151- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
99152+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
99153+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
99154+ else
99155+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
99156 pr_debug("\t%s\n", sname);
99157 }
99158- switch (m) {
99159- case 0: /* executable */
99160- mod->core_size = debug_align(mod->core_size);
99161- mod->core_text_size = mod->core_size;
99162- break;
99163- case 1: /* RO: text and ro-data */
99164- mod->core_size = debug_align(mod->core_size);
99165- mod->core_ro_size = mod->core_size;
99166- break;
99167- case 3: /* whole core */
99168- mod->core_size = debug_align(mod->core_size);
99169- break;
99170- }
99171 }
99172
99173 pr_debug("Init section allocation order:\n");
99174@@ -2102,23 +2155,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
99175 || s->sh_entsize != ~0UL
99176 || !strstarts(sname, ".init"))
99177 continue;
99178- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
99179- | INIT_OFFSET_MASK);
99180+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
99181+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
99182+ else
99183+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
99184+ s->sh_entsize |= INIT_OFFSET_MASK;
99185 pr_debug("\t%s\n", sname);
99186 }
99187- switch (m) {
99188- case 0: /* executable */
99189- mod->init_size = debug_align(mod->init_size);
99190- mod->init_text_size = mod->init_size;
99191- break;
99192- case 1: /* RO: text and ro-data */
99193- mod->init_size = debug_align(mod->init_size);
99194- mod->init_ro_size = mod->init_size;
99195- break;
99196- case 3: /* whole init */
99197- mod->init_size = debug_align(mod->init_size);
99198- break;
99199- }
99200 }
99201 }
99202
99203@@ -2291,7 +2334,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
99204
99205 /* Put symbol section at end of init part of module. */
99206 symsect->sh_flags |= SHF_ALLOC;
99207- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
99208+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
99209 info->index.sym) | INIT_OFFSET_MASK;
99210 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
99211
99212@@ -2308,16 +2351,16 @@ static void layout_symtab(struct module *mod, struct load_info *info)
99213 }
99214
99215 /* Append room for core symbols at end of core part. */
99216- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
99217- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
99218- mod->core_size += strtab_size;
99219- mod->core_size = debug_align(mod->core_size);
99220+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
99221+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
99222+ mod->core_size_rx += strtab_size;
99223+ mod->core_size_rx = debug_align(mod->core_size_rx);
99224
99225 /* Put string table section at end of init part of module. */
99226 strsect->sh_flags |= SHF_ALLOC;
99227- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
99228+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
99229 info->index.str) | INIT_OFFSET_MASK;
99230- mod->init_size = debug_align(mod->init_size);
99231+ mod->init_size_rx = debug_align(mod->init_size_rx);
99232 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
99233 }
99234
99235@@ -2334,12 +2377,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
99236 /* Make sure we get permanent strtab: don't use info->strtab. */
99237 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
99238
99239+ pax_open_kernel();
99240+
99241 /* Set types up while we still have access to sections. */
99242 for (i = 0; i < mod->num_symtab; i++)
99243 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
99244
99245- mod->core_symtab = dst = mod->module_core + info->symoffs;
99246- mod->core_strtab = s = mod->module_core + info->stroffs;
99247+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
99248+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
99249 src = mod->symtab;
99250 for (ndst = i = 0; i < mod->num_symtab; i++) {
99251 if (i == 0 ||
99252@@ -2351,6 +2396,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
99253 }
99254 }
99255 mod->core_num_syms = ndst;
99256+
99257+ pax_close_kernel();
99258 }
99259 #else
99260 static inline void layout_symtab(struct module *mod, struct load_info *info)
99261@@ -2384,17 +2431,33 @@ void * __weak module_alloc(unsigned long size)
99262 return vmalloc_exec(size);
99263 }
99264
99265-static void *module_alloc_update_bounds(unsigned long size)
99266+static void *module_alloc_update_bounds_rw(unsigned long size)
99267 {
99268 void *ret = module_alloc(size);
99269
99270 if (ret) {
99271 mutex_lock(&module_mutex);
99272 /* Update module bounds. */
99273- if ((unsigned long)ret < module_addr_min)
99274- module_addr_min = (unsigned long)ret;
99275- if ((unsigned long)ret + size > module_addr_max)
99276- module_addr_max = (unsigned long)ret + size;
99277+ if ((unsigned long)ret < module_addr_min_rw)
99278+ module_addr_min_rw = (unsigned long)ret;
99279+ if ((unsigned long)ret + size > module_addr_max_rw)
99280+ module_addr_max_rw = (unsigned long)ret + size;
99281+ mutex_unlock(&module_mutex);
99282+ }
99283+ return ret;
99284+}
99285+
99286+static void *module_alloc_update_bounds_rx(unsigned long size)
99287+{
99288+ void *ret = module_alloc_exec(size);
99289+
99290+ if (ret) {
99291+ mutex_lock(&module_mutex);
99292+ /* Update module bounds. */
99293+ if ((unsigned long)ret < module_addr_min_rx)
99294+ module_addr_min_rx = (unsigned long)ret;
99295+ if ((unsigned long)ret + size > module_addr_max_rx)
99296+ module_addr_max_rx = (unsigned long)ret + size;
99297 mutex_unlock(&module_mutex);
99298 }
99299 return ret;
99300@@ -2665,7 +2728,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
99301 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
99302
99303 if (info->index.sym == 0) {
99304+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
99305+ /*
99306+ * avoid potentially printing jibberish on attempted load
99307+ * of a module randomized with a different seed
99308+ */
99309+ pr_warn("module has no symbols (stripped?)\n");
99310+#else
99311 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
99312+#endif
99313 return ERR_PTR(-ENOEXEC);
99314 }
99315
99316@@ -2681,8 +2752,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
99317 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
99318 {
99319 const char *modmagic = get_modinfo(info, "vermagic");
99320+ const char *license = get_modinfo(info, "license");
99321 int err;
99322
99323+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
99324+ if (!license || !license_is_gpl_compatible(license))
99325+ return -ENOEXEC;
99326+#endif
99327+
99328 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
99329 modmagic = NULL;
99330
99331@@ -2707,7 +2784,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
99332 }
99333
99334 /* Set up license info based on the info section */
99335- set_license(mod, get_modinfo(info, "license"));
99336+ set_license(mod, license);
99337
99338 return 0;
99339 }
99340@@ -2801,7 +2878,7 @@ static int move_module(struct module *mod, struct load_info *info)
99341 void *ptr;
99342
99343 /* Do the allocs. */
99344- ptr = module_alloc_update_bounds(mod->core_size);
99345+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
99346 /*
99347 * The pointer to this block is stored in the module structure
99348 * which is inside the block. Just mark it as not being a
99349@@ -2811,11 +2888,11 @@ static int move_module(struct module *mod, struct load_info *info)
99350 if (!ptr)
99351 return -ENOMEM;
99352
99353- memset(ptr, 0, mod->core_size);
99354- mod->module_core = ptr;
99355+ memset(ptr, 0, mod->core_size_rw);
99356+ mod->module_core_rw = ptr;
99357
99358- if (mod->init_size) {
99359- ptr = module_alloc_update_bounds(mod->init_size);
99360+ if (mod->init_size_rw) {
99361+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
99362 /*
99363 * The pointer to this block is stored in the module structure
99364 * which is inside the block. This block doesn't need to be
99365@@ -2824,13 +2901,45 @@ static int move_module(struct module *mod, struct load_info *info)
99366 */
99367 kmemleak_ignore(ptr);
99368 if (!ptr) {
99369- module_memfree(mod->module_core);
99370+ module_memfree(mod->module_core_rw);
99371 return -ENOMEM;
99372 }
99373- memset(ptr, 0, mod->init_size);
99374- mod->module_init = ptr;
99375+ memset(ptr, 0, mod->init_size_rw);
99376+ mod->module_init_rw = ptr;
99377 } else
99378- mod->module_init = NULL;
99379+ mod->module_init_rw = NULL;
99380+
99381+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
99382+ kmemleak_not_leak(ptr);
99383+ if (!ptr) {
99384+ if (mod->module_init_rw)
99385+ module_memfree(mod->module_init_rw);
99386+ module_memfree(mod->module_core_rw);
99387+ return -ENOMEM;
99388+ }
99389+
99390+ pax_open_kernel();
99391+ memset(ptr, 0, mod->core_size_rx);
99392+ pax_close_kernel();
99393+ mod->module_core_rx = ptr;
99394+
99395+ if (mod->init_size_rx) {
99396+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
99397+ kmemleak_ignore(ptr);
99398+ if (!ptr && mod->init_size_rx) {
99399+ module_memfree_exec(mod->module_core_rx);
99400+ if (mod->module_init_rw)
99401+ module_memfree(mod->module_init_rw);
99402+ module_memfree(mod->module_core_rw);
99403+ return -ENOMEM;
99404+ }
99405+
99406+ pax_open_kernel();
99407+ memset(ptr, 0, mod->init_size_rx);
99408+ pax_close_kernel();
99409+ mod->module_init_rx = ptr;
99410+ } else
99411+ mod->module_init_rx = NULL;
99412
99413 /* Transfer each section which specifies SHF_ALLOC */
99414 pr_debug("final section addresses:\n");
99415@@ -2841,16 +2950,45 @@ static int move_module(struct module *mod, struct load_info *info)
99416 if (!(shdr->sh_flags & SHF_ALLOC))
99417 continue;
99418
99419- if (shdr->sh_entsize & INIT_OFFSET_MASK)
99420- dest = mod->module_init
99421- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
99422- else
99423- dest = mod->module_core + shdr->sh_entsize;
99424+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
99425+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
99426+ dest = mod->module_init_rw
99427+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
99428+ else
99429+ dest = mod->module_init_rx
99430+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
99431+ } else {
99432+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
99433+ dest = mod->module_core_rw + shdr->sh_entsize;
99434+ else
99435+ dest = mod->module_core_rx + shdr->sh_entsize;
99436+ }
99437+
99438+ if (shdr->sh_type != SHT_NOBITS) {
99439+
99440+#ifdef CONFIG_PAX_KERNEXEC
99441+#ifdef CONFIG_X86_64
99442+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
99443+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
99444+#endif
99445+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
99446+ pax_open_kernel();
99447+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
99448+ pax_close_kernel();
99449+ } else
99450+#endif
99451
99452- if (shdr->sh_type != SHT_NOBITS)
99453 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
99454+ }
99455 /* Update sh_addr to point to copy in image. */
99456- shdr->sh_addr = (unsigned long)dest;
99457+
99458+#ifdef CONFIG_PAX_KERNEXEC
99459+ if (shdr->sh_flags & SHF_EXECINSTR)
99460+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
99461+ else
99462+#endif
99463+
99464+ shdr->sh_addr = (unsigned long)dest;
99465 pr_debug("\t0x%lx %s\n",
99466 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
99467 }
99468@@ -2907,12 +3045,12 @@ static void flush_module_icache(const struct module *mod)
99469 * Do it before processing of module parameters, so the module
99470 * can provide parameter accessor functions of its own.
99471 */
99472- if (mod->module_init)
99473- flush_icache_range((unsigned long)mod->module_init,
99474- (unsigned long)mod->module_init
99475- + mod->init_size);
99476- flush_icache_range((unsigned long)mod->module_core,
99477- (unsigned long)mod->module_core + mod->core_size);
99478+ if (mod->module_init_rx)
99479+ flush_icache_range((unsigned long)mod->module_init_rx,
99480+ (unsigned long)mod->module_init_rx
99481+ + mod->init_size_rx);
99482+ flush_icache_range((unsigned long)mod->module_core_rx,
99483+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
99484
99485 set_fs(old_fs);
99486 }
99487@@ -2970,8 +3108,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
99488 {
99489 percpu_modfree(mod);
99490 module_arch_freeing_init(mod);
99491- module_memfree(mod->module_init);
99492- module_memfree(mod->module_core);
99493+ module_memfree_exec(mod->module_init_rx);
99494+ module_memfree_exec(mod->module_core_rx);
99495+ module_memfree(mod->module_init_rw);
99496+ module_memfree(mod->module_core_rw);
99497 }
99498
99499 int __weak module_finalize(const Elf_Ehdr *hdr,
99500@@ -2984,7 +3124,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
99501 static int post_relocation(struct module *mod, const struct load_info *info)
99502 {
99503 /* Sort exception table now relocations are done. */
99504+ pax_open_kernel();
99505 sort_extable(mod->extable, mod->extable + mod->num_exentries);
99506+ pax_close_kernel();
99507
99508 /* Copy relocated percpu area over. */
99509 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
99510@@ -3032,13 +3174,15 @@ static void do_mod_ctors(struct module *mod)
99511 /* For freeing module_init on success, in case kallsyms traversing */
99512 struct mod_initfree {
99513 struct rcu_head rcu;
99514- void *module_init;
99515+ void *module_init_rw;
99516+ void *module_init_rx;
99517 };
99518
99519 static void do_free_init(struct rcu_head *head)
99520 {
99521 struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
99522- module_memfree(m->module_init);
99523+ module_memfree(m->module_init_rw);
99524+ module_memfree_exec(m->module_init_rx);
99525 kfree(m);
99526 }
99527
99528@@ -3058,7 +3202,8 @@ static noinline int do_init_module(struct module *mod)
99529 ret = -ENOMEM;
99530 goto fail;
99531 }
99532- freeinit->module_init = mod->module_init;
99533+ freeinit->module_init_rw = mod->module_init_rw;
99534+ freeinit->module_init_rx = mod->module_init_rx;
99535
99536 /*
99537 * We want to find out whether @mod uses async during init. Clear
99538@@ -3117,10 +3262,10 @@ static noinline int do_init_module(struct module *mod)
99539 #endif
99540 unset_module_init_ro_nx(mod);
99541 module_arch_freeing_init(mod);
99542- mod->module_init = NULL;
99543- mod->init_size = 0;
99544- mod->init_ro_size = 0;
99545- mod->init_text_size = 0;
99546+ mod->module_init_rw = NULL;
99547+ mod->module_init_rx = NULL;
99548+ mod->init_size_rw = 0;
99549+ mod->init_size_rx = 0;
99550 /*
99551 * We want to free module_init, but be aware that kallsyms may be
99552 * walking this with preempt disabled. In all the failure paths,
99553@@ -3208,16 +3353,16 @@ static int complete_formation(struct module *mod, struct load_info *info)
99554 module_bug_finalize(info->hdr, info->sechdrs, mod);
99555
99556 /* Set RO and NX regions for core */
99557- set_section_ro_nx(mod->module_core,
99558- mod->core_text_size,
99559- mod->core_ro_size,
99560- mod->core_size);
99561+ set_section_ro_nx(mod->module_core_rx,
99562+ mod->core_size_rx,
99563+ mod->core_size_rx,
99564+ mod->core_size_rx);
99565
99566 /* Set RO and NX regions for init */
99567- set_section_ro_nx(mod->module_init,
99568- mod->init_text_size,
99569- mod->init_ro_size,
99570- mod->init_size);
99571+ set_section_ro_nx(mod->module_init_rx,
99572+ mod->init_size_rx,
99573+ mod->init_size_rx,
99574+ mod->init_size_rx);
99575
99576 /* Mark state as coming so strong_try_module_get() ignores us,
99577 * but kallsyms etc. can see us. */
99578@@ -3301,9 +3446,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
99579 if (err)
99580 goto free_unload;
99581
99582+ /* Now copy in args */
99583+ mod->args = strndup_user(uargs, ~0UL >> 1);
99584+ if (IS_ERR(mod->args)) {
99585+ err = PTR_ERR(mod->args);
99586+ goto free_unload;
99587+ }
99588+
99589 /* Set up MODINFO_ATTR fields */
99590 setup_modinfo(mod, info);
99591
99592+#ifdef CONFIG_GRKERNSEC_MODHARDEN
99593+ {
99594+ char *p, *p2;
99595+
99596+ if (strstr(mod->args, "grsec_modharden_netdev")) {
99597+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
99598+ err = -EPERM;
99599+ goto free_modinfo;
99600+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
99601+ p += sizeof("grsec_modharden_normal") - 1;
99602+ p2 = strstr(p, "_");
99603+ if (p2) {
99604+ *p2 = '\0';
99605+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
99606+ *p2 = '_';
99607+ }
99608+ err = -EPERM;
99609+ goto free_modinfo;
99610+ }
99611+ }
99612+#endif
99613+
99614 /* Fix up syms, so that st_value is a pointer to location. */
99615 err = simplify_symbols(mod, info);
99616 if (err < 0)
99617@@ -3319,13 +3493,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
99618
99619 flush_module_icache(mod);
99620
99621- /* Now copy in args */
99622- mod->args = strndup_user(uargs, ~0UL >> 1);
99623- if (IS_ERR(mod->args)) {
99624- err = PTR_ERR(mod->args);
99625- goto free_arch_cleanup;
99626- }
99627-
99628 dynamic_debug_setup(info->debug, info->num_debug);
99629
99630 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
99631@@ -3373,11 +3540,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
99632 ddebug_cleanup:
99633 dynamic_debug_remove(info->debug);
99634 synchronize_sched();
99635- kfree(mod->args);
99636- free_arch_cleanup:
99637 module_arch_cleanup(mod);
99638 free_modinfo:
99639 free_modinfo(mod);
99640+ kfree(mod->args);
99641 free_unload:
99642 module_unload_free(mod);
99643 unlink_mod:
99644@@ -3390,7 +3556,8 @@ static int load_module(struct load_info *info, const char __user *uargs,
99645 mutex_unlock(&module_mutex);
99646 free_module:
99647 /* Free lock-classes; relies on the preceding sync_rcu() */
99648- lockdep_free_key_range(mod->module_core, mod->core_size);
99649+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
99650+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
99651
99652 module_deallocate(mod, info);
99653 free_copy:
99654@@ -3467,10 +3634,16 @@ static const char *get_ksymbol(struct module *mod,
99655 unsigned long nextval;
99656
99657 /* At worse, next value is at end of module */
99658- if (within_module_init(addr, mod))
99659- nextval = (unsigned long)mod->module_init+mod->init_text_size;
99660+ if (within_module_init_rx(addr, mod))
99661+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
99662+ else if (within_module_init_rw(addr, mod))
99663+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
99664+ else if (within_module_core_rx(addr, mod))
99665+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
99666+ else if (within_module_core_rw(addr, mod))
99667+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
99668 else
99669- nextval = (unsigned long)mod->module_core+mod->core_text_size;
99670+ return NULL;
99671
99672 /* Scan for closest preceding symbol, and next symbol. (ELF
99673 starts real symbols at 1). */
99674@@ -3718,7 +3891,7 @@ static int m_show(struct seq_file *m, void *p)
99675 return 0;
99676
99677 seq_printf(m, "%s %u",
99678- mod->name, mod->init_size + mod->core_size);
99679+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
99680 print_unload_info(m, mod);
99681
99682 /* Informative for users. */
99683@@ -3727,7 +3900,7 @@ static int m_show(struct seq_file *m, void *p)
99684 mod->state == MODULE_STATE_COMING ? "Loading" :
99685 "Live");
99686 /* Used by oprofile and other similar tools. */
99687- seq_printf(m, " 0x%pK", mod->module_core);
99688+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
99689
99690 /* Taints info */
99691 if (mod->taints)
99692@@ -3763,7 +3936,17 @@ static const struct file_operations proc_modules_operations = {
99693
99694 static int __init proc_modules_init(void)
99695 {
99696+#ifndef CONFIG_GRKERNSEC_HIDESYM
99697+#ifdef CONFIG_GRKERNSEC_PROC_USER
99698+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
99699+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
99700+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
99701+#else
99702 proc_create("modules", 0, NULL, &proc_modules_operations);
99703+#endif
99704+#else
99705+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
99706+#endif
99707 return 0;
99708 }
99709 module_init(proc_modules_init);
99710@@ -3824,7 +4007,8 @@ struct module *__module_address(unsigned long addr)
99711 {
99712 struct module *mod;
99713
99714- if (addr < module_addr_min || addr > module_addr_max)
99715+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
99716+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
99717 return NULL;
99718
99719 list_for_each_entry_rcu(mod, &modules, list) {
99720@@ -3865,11 +4049,20 @@ bool is_module_text_address(unsigned long addr)
99721 */
99722 struct module *__module_text_address(unsigned long addr)
99723 {
99724- struct module *mod = __module_address(addr);
99725+ struct module *mod;
99726+
99727+#ifdef CONFIG_X86_32
99728+ addr = ktla_ktva(addr);
99729+#endif
99730+
99731+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
99732+ return NULL;
99733+
99734+ mod = __module_address(addr);
99735+
99736 if (mod) {
99737 /* Make sure it's within the text section. */
99738- if (!within(addr, mod->module_init, mod->init_text_size)
99739- && !within(addr, mod->module_core, mod->core_text_size))
99740+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
99741 mod = NULL;
99742 }
99743 return mod;
99744diff --git a/kernel/notifier.c b/kernel/notifier.c
99745index ae9fc7c..5085fbf 100644
99746--- a/kernel/notifier.c
99747+++ b/kernel/notifier.c
99748@@ -5,6 +5,7 @@
99749 #include <linux/rcupdate.h>
99750 #include <linux/vmalloc.h>
99751 #include <linux/reboot.h>
99752+#include <linux/mm.h>
99753
99754 /*
99755 * Notifier list for kernel code which wants to be called
99756@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
99757 while ((*nl) != NULL) {
99758 if (n->priority > (*nl)->priority)
99759 break;
99760- nl = &((*nl)->next);
99761+ nl = (struct notifier_block **)&((*nl)->next);
99762 }
99763- n->next = *nl;
99764+ pax_open_kernel();
99765+ *(const void **)&n->next = *nl;
99766 rcu_assign_pointer(*nl, n);
99767+ pax_close_kernel();
99768 return 0;
99769 }
99770
99771@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
99772 return 0;
99773 if (n->priority > (*nl)->priority)
99774 break;
99775- nl = &((*nl)->next);
99776+ nl = (struct notifier_block **)&((*nl)->next);
99777 }
99778- n->next = *nl;
99779+ pax_open_kernel();
99780+ *(const void **)&n->next = *nl;
99781 rcu_assign_pointer(*nl, n);
99782+ pax_close_kernel();
99783 return 0;
99784 }
99785
99786@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
99787 {
99788 while ((*nl) != NULL) {
99789 if ((*nl) == n) {
99790+ pax_open_kernel();
99791 rcu_assign_pointer(*nl, n->next);
99792+ pax_close_kernel();
99793 return 0;
99794 }
99795- nl = &((*nl)->next);
99796+ nl = (struct notifier_block **)&((*nl)->next);
99797 }
99798 return -ENOENT;
99799 }
99800diff --git a/kernel/padata.c b/kernel/padata.c
99801index b38bea9..91acfbe 100644
99802--- a/kernel/padata.c
99803+++ b/kernel/padata.c
99804@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
99805 * seq_nr mod. number of cpus in use.
99806 */
99807
99808- seq_nr = atomic_inc_return(&pd->seq_nr);
99809+ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
99810 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
99811
99812 return padata_index_to_cpu(pd, cpu_index);
99813@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
99814 padata_init_pqueues(pd);
99815 padata_init_squeues(pd);
99816 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
99817- atomic_set(&pd->seq_nr, -1);
99818+ atomic_set_unchecked(&pd->seq_nr, -1);
99819 atomic_set(&pd->reorder_objects, 0);
99820 atomic_set(&pd->refcnt, 0);
99821 pd->pinst = pinst;
99822diff --git a/kernel/panic.c b/kernel/panic.c
99823index 8136ad7..15c857b 100644
99824--- a/kernel/panic.c
99825+++ b/kernel/panic.c
99826@@ -54,7 +54,7 @@ EXPORT_SYMBOL(panic_blink);
99827 /*
99828 * Stop ourself in panic -- architecture code may override this
99829 */
99830-void __weak panic_smp_self_stop(void)
99831+void __weak __noreturn panic_smp_self_stop(void)
99832 {
99833 while (1)
99834 cpu_relax();
99835@@ -425,7 +425,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
99836 disable_trace_on_warning();
99837
99838 pr_warn("------------[ cut here ]------------\n");
99839- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
99840+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
99841 raw_smp_processor_id(), current->pid, file, line, caller);
99842
99843 if (args)
99844@@ -490,7 +490,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
99845 */
99846 __visible void __stack_chk_fail(void)
99847 {
99848- panic("stack-protector: Kernel stack is corrupted in: %p\n",
99849+ dump_stack();
99850+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
99851 __builtin_return_address(0));
99852 }
99853 EXPORT_SYMBOL(__stack_chk_fail);
99854diff --git a/kernel/pid.c b/kernel/pid.c
99855index cd36a5e..11f185d 100644
99856--- a/kernel/pid.c
99857+++ b/kernel/pid.c
99858@@ -33,6 +33,7 @@
99859 #include <linux/rculist.h>
99860 #include <linux/bootmem.h>
99861 #include <linux/hash.h>
99862+#include <linux/security.h>
99863 #include <linux/pid_namespace.h>
99864 #include <linux/init_task.h>
99865 #include <linux/syscalls.h>
99866@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
99867
99868 int pid_max = PID_MAX_DEFAULT;
99869
99870-#define RESERVED_PIDS 300
99871+#define RESERVED_PIDS 500
99872
99873 int pid_max_min = RESERVED_PIDS + 1;
99874 int pid_max_max = PID_MAX_LIMIT;
99875@@ -450,10 +451,18 @@ EXPORT_SYMBOL(pid_task);
99876 */
99877 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
99878 {
99879+ struct task_struct *task;
99880+
99881 rcu_lockdep_assert(rcu_read_lock_held(),
99882 "find_task_by_pid_ns() needs rcu_read_lock()"
99883 " protection");
99884- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
99885+
99886+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
99887+
99888+ if (gr_pid_is_chrooted(task))
99889+ return NULL;
99890+
99891+ return task;
99892 }
99893
99894 struct task_struct *find_task_by_vpid(pid_t vnr)
99895@@ -461,6 +470,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
99896 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
99897 }
99898
99899+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
99900+{
99901+ rcu_lockdep_assert(rcu_read_lock_held(),
99902+ "find_task_by_pid_ns() needs rcu_read_lock()"
99903+ " protection");
99904+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
99905+}
99906+
99907 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
99908 {
99909 struct pid *pid;
99910diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
99911index a65ba13..f600dbb 100644
99912--- a/kernel/pid_namespace.c
99913+++ b/kernel/pid_namespace.c
99914@@ -274,7 +274,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
99915 void __user *buffer, size_t *lenp, loff_t *ppos)
99916 {
99917 struct pid_namespace *pid_ns = task_active_pid_ns(current);
99918- struct ctl_table tmp = *table;
99919+ ctl_table_no_const tmp = *table;
99920
99921 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
99922 return -EPERM;
99923diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
99924index 7e01f78..f5da19d 100644
99925--- a/kernel/power/Kconfig
99926+++ b/kernel/power/Kconfig
99927@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
99928 config HIBERNATION
99929 bool "Hibernation (aka 'suspend to disk')"
99930 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
99931+ depends on !GRKERNSEC_KMEM
99932+ depends on !PAX_MEMORY_SANITIZE
99933 select HIBERNATE_CALLBACKS
99934 select LZO_COMPRESS
99935 select LZO_DECOMPRESS
99936diff --git a/kernel/power/process.c b/kernel/power/process.c
99937index 564f786..361a18e 100644
99938--- a/kernel/power/process.c
99939+++ b/kernel/power/process.c
99940@@ -35,6 +35,7 @@ static int try_to_freeze_tasks(bool user_only)
99941 unsigned int elapsed_msecs;
99942 bool wakeup = false;
99943 int sleep_usecs = USEC_PER_MSEC;
99944+ bool timedout = false;
99945
99946 do_gettimeofday(&start);
99947
99948@@ -45,13 +46,20 @@ static int try_to_freeze_tasks(bool user_only)
99949
99950 while (true) {
99951 todo = 0;
99952+ if (time_after(jiffies, end_time))
99953+ timedout = true;
99954 read_lock(&tasklist_lock);
99955 for_each_process_thread(g, p) {
99956 if (p == current || !freeze_task(p))
99957 continue;
99958
99959- if (!freezer_should_skip(p))
99960+ if (!freezer_should_skip(p)) {
99961 todo++;
99962+ if (timedout) {
99963+ printk(KERN_ERR "Task refusing to freeze:\n");
99964+ sched_show_task(p);
99965+ }
99966+ }
99967 }
99968 read_unlock(&tasklist_lock);
99969
99970@@ -60,7 +68,7 @@ static int try_to_freeze_tasks(bool user_only)
99971 todo += wq_busy;
99972 }
99973
99974- if (!todo || time_after(jiffies, end_time))
99975+ if (!todo || timedout)
99976 break;
99977
99978 if (pm_wakeup_pending()) {
99979diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
99980index bb0635b..9aff9f3 100644
99981--- a/kernel/printk/printk.c
99982+++ b/kernel/printk/printk.c
99983@@ -486,6 +486,11 @@ int check_syslog_permissions(int type, bool from_file)
99984 if (from_file && type != SYSLOG_ACTION_OPEN)
99985 return 0;
99986
99987+#ifdef CONFIG_GRKERNSEC_DMESG
99988+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
99989+ return -EPERM;
99990+#endif
99991+
99992 if (syslog_action_restricted(type)) {
99993 if (capable(CAP_SYSLOG))
99994 return 0;
99995diff --git a/kernel/profile.c b/kernel/profile.c
99996index a7bcd28..5b368fa 100644
99997--- a/kernel/profile.c
99998+++ b/kernel/profile.c
99999@@ -37,7 +37,7 @@ struct profile_hit {
100000 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
100001 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
100002
100003-static atomic_t *prof_buffer;
100004+static atomic_unchecked_t *prof_buffer;
100005 static unsigned long prof_len, prof_shift;
100006
100007 int prof_on __read_mostly;
100008@@ -256,7 +256,7 @@ static void profile_flip_buffers(void)
100009 hits[i].pc = 0;
100010 continue;
100011 }
100012- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
100013+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
100014 hits[i].hits = hits[i].pc = 0;
100015 }
100016 }
100017@@ -317,9 +317,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
100018 * Add the current hit(s) and flush the write-queue out
100019 * to the global buffer:
100020 */
100021- atomic_add(nr_hits, &prof_buffer[pc]);
100022+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
100023 for (i = 0; i < NR_PROFILE_HIT; ++i) {
100024- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
100025+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
100026 hits[i].pc = hits[i].hits = 0;
100027 }
100028 out:
100029@@ -394,7 +394,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
100030 {
100031 unsigned long pc;
100032 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
100033- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
100034+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
100035 }
100036 #endif /* !CONFIG_SMP */
100037
100038@@ -489,7 +489,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
100039 return -EFAULT;
100040 buf++; p++; count--; read++;
100041 }
100042- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
100043+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
100044 if (copy_to_user(buf, (void *)pnt, count))
100045 return -EFAULT;
100046 read += count;
100047@@ -520,7 +520,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
100048 }
100049 #endif
100050 profile_discard_flip_buffers();
100051- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
100052+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
100053 return count;
100054 }
100055
100056diff --git a/kernel/ptrace.c b/kernel/ptrace.c
100057index 9a34bd8..38d90e5 100644
100058--- a/kernel/ptrace.c
100059+++ b/kernel/ptrace.c
100060@@ -321,7 +321,7 @@ static int ptrace_attach(struct task_struct *task, long request,
100061 if (seize)
100062 flags |= PT_SEIZED;
100063 rcu_read_lock();
100064- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
100065+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
100066 flags |= PT_PTRACE_CAP;
100067 rcu_read_unlock();
100068 task->ptrace = flags;
100069@@ -515,7 +515,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
100070 break;
100071 return -EIO;
100072 }
100073- if (copy_to_user(dst, buf, retval))
100074+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
100075 return -EFAULT;
100076 copied += retval;
100077 src += retval;
100078@@ -803,7 +803,7 @@ int ptrace_request(struct task_struct *child, long request,
100079 bool seized = child->ptrace & PT_SEIZED;
100080 int ret = -EIO;
100081 siginfo_t siginfo, *si;
100082- void __user *datavp = (void __user *) data;
100083+ void __user *datavp = (__force void __user *) data;
100084 unsigned long __user *datalp = datavp;
100085 unsigned long flags;
100086
100087@@ -1049,14 +1049,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
100088 goto out;
100089 }
100090
100091+ if (gr_handle_ptrace(child, request)) {
100092+ ret = -EPERM;
100093+ goto out_put_task_struct;
100094+ }
100095+
100096 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
100097 ret = ptrace_attach(child, request, addr, data);
100098 /*
100099 * Some architectures need to do book-keeping after
100100 * a ptrace attach.
100101 */
100102- if (!ret)
100103+ if (!ret) {
100104 arch_ptrace_attach(child);
100105+ gr_audit_ptrace(child);
100106+ }
100107 goto out_put_task_struct;
100108 }
100109
100110@@ -1084,7 +1091,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
100111 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
100112 if (copied != sizeof(tmp))
100113 return -EIO;
100114- return put_user(tmp, (unsigned long __user *)data);
100115+ return put_user(tmp, (__force unsigned long __user *)data);
100116 }
100117
100118 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
100119@@ -1177,7 +1184,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
100120 }
100121
100122 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
100123- compat_long_t, addr, compat_long_t, data)
100124+ compat_ulong_t, addr, compat_ulong_t, data)
100125 {
100126 struct task_struct *child;
100127 long ret;
100128@@ -1193,14 +1200,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
100129 goto out;
100130 }
100131
100132+ if (gr_handle_ptrace(child, request)) {
100133+ ret = -EPERM;
100134+ goto out_put_task_struct;
100135+ }
100136+
100137 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
100138 ret = ptrace_attach(child, request, addr, data);
100139 /*
100140 * Some architectures need to do book-keeping after
100141 * a ptrace attach.
100142 */
100143- if (!ret)
100144+ if (!ret) {
100145 arch_ptrace_attach(child);
100146+ gr_audit_ptrace(child);
100147+ }
100148 goto out_put_task_struct;
100149 }
100150
100151diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
100152index 30d42aa..cac5d66 100644
100153--- a/kernel/rcu/rcutorture.c
100154+++ b/kernel/rcu/rcutorture.c
100155@@ -134,12 +134,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
100156 rcu_torture_count) = { 0 };
100157 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
100158 rcu_torture_batch) = { 0 };
100159-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
100160-static atomic_t n_rcu_torture_alloc;
100161-static atomic_t n_rcu_torture_alloc_fail;
100162-static atomic_t n_rcu_torture_free;
100163-static atomic_t n_rcu_torture_mberror;
100164-static atomic_t n_rcu_torture_error;
100165+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
100166+static atomic_unchecked_t n_rcu_torture_alloc;
100167+static atomic_unchecked_t n_rcu_torture_alloc_fail;
100168+static atomic_unchecked_t n_rcu_torture_free;
100169+static atomic_unchecked_t n_rcu_torture_mberror;
100170+static atomic_unchecked_t n_rcu_torture_error;
100171 static long n_rcu_torture_barrier_error;
100172 static long n_rcu_torture_boost_ktrerror;
100173 static long n_rcu_torture_boost_rterror;
100174@@ -148,7 +148,7 @@ static long n_rcu_torture_boosts;
100175 static long n_rcu_torture_timers;
100176 static long n_barrier_attempts;
100177 static long n_barrier_successes;
100178-static atomic_long_t n_cbfloods;
100179+static atomic_long_unchecked_t n_cbfloods;
100180 static struct list_head rcu_torture_removed;
100181
100182 static int rcu_torture_writer_state;
100183@@ -211,11 +211,11 @@ rcu_torture_alloc(void)
100184
100185 spin_lock_bh(&rcu_torture_lock);
100186 if (list_empty(&rcu_torture_freelist)) {
100187- atomic_inc(&n_rcu_torture_alloc_fail);
100188+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
100189 spin_unlock_bh(&rcu_torture_lock);
100190 return NULL;
100191 }
100192- atomic_inc(&n_rcu_torture_alloc);
100193+ atomic_inc_unchecked(&n_rcu_torture_alloc);
100194 p = rcu_torture_freelist.next;
100195 list_del_init(p);
100196 spin_unlock_bh(&rcu_torture_lock);
100197@@ -228,7 +228,7 @@ rcu_torture_alloc(void)
100198 static void
100199 rcu_torture_free(struct rcu_torture *p)
100200 {
100201- atomic_inc(&n_rcu_torture_free);
100202+ atomic_inc_unchecked(&n_rcu_torture_free);
100203 spin_lock_bh(&rcu_torture_lock);
100204 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
100205 spin_unlock_bh(&rcu_torture_lock);
100206@@ -308,7 +308,7 @@ rcu_torture_pipe_update_one(struct rcu_torture *rp)
100207 i = rp->rtort_pipe_count;
100208 if (i > RCU_TORTURE_PIPE_LEN)
100209 i = RCU_TORTURE_PIPE_LEN;
100210- atomic_inc(&rcu_torture_wcount[i]);
100211+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
100212 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
100213 rp->rtort_mbtest = 0;
100214 return true;
100215@@ -796,7 +796,7 @@ rcu_torture_cbflood(void *arg)
100216 VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
100217 do {
100218 schedule_timeout_interruptible(cbflood_inter_holdoff);
100219- atomic_long_inc(&n_cbfloods);
100220+ atomic_long_inc_unchecked(&n_cbfloods);
100221 WARN_ON(signal_pending(current));
100222 for (i = 0; i < cbflood_n_burst; i++) {
100223 for (j = 0; j < cbflood_n_per_burst; j++) {
100224@@ -915,7 +915,7 @@ rcu_torture_writer(void *arg)
100225 i = old_rp->rtort_pipe_count;
100226 if (i > RCU_TORTURE_PIPE_LEN)
100227 i = RCU_TORTURE_PIPE_LEN;
100228- atomic_inc(&rcu_torture_wcount[i]);
100229+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
100230 old_rp->rtort_pipe_count++;
100231 switch (synctype[torture_random(&rand) % nsynctypes]) {
100232 case RTWS_DEF_FREE:
100233@@ -1036,7 +1036,7 @@ static void rcu_torture_timer(unsigned long unused)
100234 return;
100235 }
100236 if (p->rtort_mbtest == 0)
100237- atomic_inc(&n_rcu_torture_mberror);
100238+ atomic_inc_unchecked(&n_rcu_torture_mberror);
100239 spin_lock(&rand_lock);
100240 cur_ops->read_delay(&rand);
100241 n_rcu_torture_timers++;
100242@@ -1111,7 +1111,7 @@ rcu_torture_reader(void *arg)
100243 continue;
100244 }
100245 if (p->rtort_mbtest == 0)
100246- atomic_inc(&n_rcu_torture_mberror);
100247+ atomic_inc_unchecked(&n_rcu_torture_mberror);
100248 cur_ops->read_delay(&rand);
100249 preempt_disable();
100250 pipe_count = p->rtort_pipe_count;
100251@@ -1180,11 +1180,11 @@ rcu_torture_stats_print(void)
100252 rcu_torture_current,
100253 rcu_torture_current_version,
100254 list_empty(&rcu_torture_freelist),
100255- atomic_read(&n_rcu_torture_alloc),
100256- atomic_read(&n_rcu_torture_alloc_fail),
100257- atomic_read(&n_rcu_torture_free));
100258+ atomic_read_unchecked(&n_rcu_torture_alloc),
100259+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
100260+ atomic_read_unchecked(&n_rcu_torture_free));
100261 pr_cont("rtmbe: %d rtbke: %ld rtbre: %ld ",
100262- atomic_read(&n_rcu_torture_mberror),
100263+ atomic_read_unchecked(&n_rcu_torture_mberror),
100264 n_rcu_torture_boost_ktrerror,
100265 n_rcu_torture_boost_rterror);
100266 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
100267@@ -1196,17 +1196,17 @@ rcu_torture_stats_print(void)
100268 n_barrier_successes,
100269 n_barrier_attempts,
100270 n_rcu_torture_barrier_error);
100271- pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
100272+ pr_cont("cbflood: %ld\n", atomic_long_read_unchecked(&n_cbfloods));
100273
100274 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
100275- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
100276+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
100277 n_rcu_torture_barrier_error != 0 ||
100278 n_rcu_torture_boost_ktrerror != 0 ||
100279 n_rcu_torture_boost_rterror != 0 ||
100280 n_rcu_torture_boost_failure != 0 ||
100281 i > 1) {
100282 pr_cont("%s", "!!! ");
100283- atomic_inc(&n_rcu_torture_error);
100284+ atomic_inc_unchecked(&n_rcu_torture_error);
100285 WARN_ON_ONCE(1);
100286 }
100287 pr_cont("Reader Pipe: ");
100288@@ -1223,7 +1223,7 @@ rcu_torture_stats_print(void)
100289 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
100290 pr_cont("Free-Block Circulation: ");
100291 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
100292- pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
100293+ pr_cont(" %d", atomic_read_unchecked(&rcu_torture_wcount[i]));
100294 }
100295 pr_cont("\n");
100296
100297@@ -1570,7 +1570,7 @@ rcu_torture_cleanup(void)
100298
100299 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
100300
100301- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
100302+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
100303 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
100304 else if (torture_onoff_failures())
100305 rcu_torture_print_module_parms(cur_ops,
100306@@ -1695,18 +1695,18 @@ rcu_torture_init(void)
100307
100308 rcu_torture_current = NULL;
100309 rcu_torture_current_version = 0;
100310- atomic_set(&n_rcu_torture_alloc, 0);
100311- atomic_set(&n_rcu_torture_alloc_fail, 0);
100312- atomic_set(&n_rcu_torture_free, 0);
100313- atomic_set(&n_rcu_torture_mberror, 0);
100314- atomic_set(&n_rcu_torture_error, 0);
100315+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
100316+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
100317+ atomic_set_unchecked(&n_rcu_torture_free, 0);
100318+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
100319+ atomic_set_unchecked(&n_rcu_torture_error, 0);
100320 n_rcu_torture_barrier_error = 0;
100321 n_rcu_torture_boost_ktrerror = 0;
100322 n_rcu_torture_boost_rterror = 0;
100323 n_rcu_torture_boost_failure = 0;
100324 n_rcu_torture_boosts = 0;
100325 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
100326- atomic_set(&rcu_torture_wcount[i], 0);
100327+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
100328 for_each_possible_cpu(cpu) {
100329 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
100330 per_cpu(rcu_torture_count, cpu)[i] = 0;
100331diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
100332index cc9ceca..ce075a6 100644
100333--- a/kernel/rcu/tiny.c
100334+++ b/kernel/rcu/tiny.c
100335@@ -42,7 +42,7 @@
100336 /* Forward declarations for tiny_plugin.h. */
100337 struct rcu_ctrlblk;
100338 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
100339-static void rcu_process_callbacks(struct softirq_action *unused);
100340+static void rcu_process_callbacks(void);
100341 static void __call_rcu(struct rcu_head *head,
100342 void (*func)(struct rcu_head *rcu),
100343 struct rcu_ctrlblk *rcp);
100344@@ -210,7 +210,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
100345 false));
100346 }
100347
100348-static void rcu_process_callbacks(struct softirq_action *unused)
100349+static __latent_entropy void rcu_process_callbacks(void)
100350 {
100351 __rcu_process_callbacks(&rcu_sched_ctrlblk);
100352 __rcu_process_callbacks(&rcu_bh_ctrlblk);
100353diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
100354index f94e209..d2985bd 100644
100355--- a/kernel/rcu/tiny_plugin.h
100356+++ b/kernel/rcu/tiny_plugin.h
100357@@ -150,10 +150,10 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
100358 rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE,
100359 jiffies - rcp->gp_start, rcp->qlen);
100360 dump_stack();
100361- ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
100362+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies +
100363 3 * rcu_jiffies_till_stall_check() + 3;
100364 } else if (ULONG_CMP_GE(j, js)) {
100365- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
100366+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
100367 }
100368 }
100369
100370@@ -161,7 +161,7 @@ static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
100371 {
100372 rcp->ticks_this_gp = 0;
100373 rcp->gp_start = jiffies;
100374- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
100375+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
100376 }
100377
100378 static void check_cpu_stalls(void)
100379diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
100380index 48d640c..9401d30 100644
100381--- a/kernel/rcu/tree.c
100382+++ b/kernel/rcu/tree.c
100383@@ -268,7 +268,7 @@ static void rcu_momentary_dyntick_idle(void)
100384 */
100385 rdtp = this_cpu_ptr(&rcu_dynticks);
100386 smp_mb__before_atomic(); /* Earlier stuff before QS. */
100387- atomic_add(2, &rdtp->dynticks); /* QS. */
100388+ atomic_add_unchecked(2, &rdtp->dynticks); /* QS. */
100389 smp_mb__after_atomic(); /* Later stuff after QS. */
100390 break;
100391 }
100392@@ -580,9 +580,9 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
100393 rcu_prepare_for_idle();
100394 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
100395 smp_mb__before_atomic(); /* See above. */
100396- atomic_inc(&rdtp->dynticks);
100397+ atomic_inc_unchecked(&rdtp->dynticks);
100398 smp_mb__after_atomic(); /* Force ordering with next sojourn. */
100399- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
100400+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
100401 rcu_dynticks_task_enter();
100402
100403 /*
100404@@ -703,10 +703,10 @@ static void rcu_eqs_exit_common(long long oldval, int user)
100405
100406 rcu_dynticks_task_exit();
100407 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
100408- atomic_inc(&rdtp->dynticks);
100409+ atomic_inc_unchecked(&rdtp->dynticks);
100410 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
100411 smp_mb__after_atomic(); /* See above. */
100412- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
100413+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
100414 rcu_cleanup_after_idle();
100415 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
100416 if (!user && !is_idle_task(current)) {
100417@@ -840,12 +840,12 @@ void rcu_nmi_enter(void)
100418 * to be in the outermost NMI handler that interrupted an RCU-idle
100419 * period (observation due to Andy Lutomirski).
100420 */
100421- if (!(atomic_read(&rdtp->dynticks) & 0x1)) {
100422+ if (!(atomic_read_unchecked(&rdtp->dynticks) & 0x1)) {
100423 smp_mb__before_atomic(); /* Force delay from prior write. */
100424- atomic_inc(&rdtp->dynticks);
100425+ atomic_inc_unchecked(&rdtp->dynticks);
100426 /* atomic_inc() before later RCU read-side crit sects */
100427 smp_mb__after_atomic(); /* See above. */
100428- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
100429+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
100430 incby = 1;
100431 }
100432 rdtp->dynticks_nmi_nesting += incby;
100433@@ -870,7 +870,7 @@ void rcu_nmi_exit(void)
100434 * to us!)
100435 */
100436 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
100437- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
100438+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
100439
100440 /*
100441 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
100442@@ -885,9 +885,9 @@ void rcu_nmi_exit(void)
100443 rdtp->dynticks_nmi_nesting = 0;
100444 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
100445 smp_mb__before_atomic(); /* See above. */
100446- atomic_inc(&rdtp->dynticks);
100447+ atomic_inc_unchecked(&rdtp->dynticks);
100448 smp_mb__after_atomic(); /* Force delay to next write. */
100449- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
100450+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
100451 }
100452
100453 /**
100454@@ -900,7 +900,7 @@ void rcu_nmi_exit(void)
100455 */
100456 bool notrace __rcu_is_watching(void)
100457 {
100458- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
100459+ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
100460 }
100461
100462 /**
100463@@ -983,7 +983,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
100464 static int dyntick_save_progress_counter(struct rcu_data *rdp,
100465 bool *isidle, unsigned long *maxj)
100466 {
100467- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
100468+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
100469 rcu_sysidle_check_cpu(rdp, isidle, maxj);
100470 if ((rdp->dynticks_snap & 0x1) == 0) {
100471 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
100472@@ -991,7 +991,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
100473 } else {
100474 if (ULONG_CMP_LT(ACCESS_ONCE(rdp->gpnum) + ULONG_MAX / 4,
100475 rdp->mynode->gpnum))
100476- ACCESS_ONCE(rdp->gpwrap) = true;
100477+ ACCESS_ONCE_RW(rdp->gpwrap) = true;
100478 return 0;
100479 }
100480 }
100481@@ -1009,7 +1009,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
100482 int *rcrmp;
100483 unsigned int snap;
100484
100485- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
100486+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
100487 snap = (unsigned int)rdp->dynticks_snap;
100488
100489 /*
100490@@ -1072,10 +1072,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
100491 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
100492 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
100493 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
100494- ACCESS_ONCE(rdp->cond_resched_completed) =
100495+ ACCESS_ONCE_RW(rdp->cond_resched_completed) =
100496 ACCESS_ONCE(rdp->mynode->completed);
100497 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
100498- ACCESS_ONCE(*rcrmp) =
100499+ ACCESS_ONCE_RW(*rcrmp) =
100500 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
100501 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
100502 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
100503@@ -1097,7 +1097,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
100504 rsp->gp_start = j;
100505 smp_wmb(); /* Record start time before stall time. */
100506 j1 = rcu_jiffies_till_stall_check();
100507- ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
100508+ ACCESS_ONCE_RW(rsp->jiffies_stall) = j + j1;
100509 rsp->jiffies_resched = j + j1 / 2;
100510 rsp->n_force_qs_gpstart = ACCESS_ONCE(rsp->n_force_qs);
100511 }
100512@@ -1156,7 +1156,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
100513 raw_spin_unlock_irqrestore(&rnp->lock, flags);
100514 return;
100515 }
100516- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
100517+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
100518 raw_spin_unlock_irqrestore(&rnp->lock, flags);
100519
100520 /*
100521@@ -1240,7 +1240,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
100522
100523 raw_spin_lock_irqsave(&rnp->lock, flags);
100524 if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
100525- ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
100526+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies +
100527 3 * rcu_jiffies_till_stall_check() + 3;
100528 raw_spin_unlock_irqrestore(&rnp->lock, flags);
100529
100530@@ -1324,7 +1324,7 @@ void rcu_cpu_stall_reset(void)
100531 struct rcu_state *rsp;
100532
100533 for_each_rcu_flavor(rsp)
100534- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
100535+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
100536 }
100537
100538 /*
100539@@ -1671,7 +1671,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
100540 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
100541 rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
100542 zero_cpu_stall_ticks(rdp);
100543- ACCESS_ONCE(rdp->gpwrap) = false;
100544+ ACCESS_ONCE_RW(rdp->gpwrap) = false;
100545 }
100546 return ret;
100547 }
100548@@ -1706,7 +1706,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
100549 struct rcu_data *rdp;
100550 struct rcu_node *rnp = rcu_get_root(rsp);
100551
100552- ACCESS_ONCE(rsp->gp_activity) = jiffies;
100553+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
100554 rcu_bind_gp_kthread();
100555 raw_spin_lock_irq(&rnp->lock);
100556 smp_mb__after_unlock_lock();
100557@@ -1715,7 +1715,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
100558 raw_spin_unlock_irq(&rnp->lock);
100559 return 0;
100560 }
100561- ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
100562+ ACCESS_ONCE_RW(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
100563
100564 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
100565 /*
100566@@ -1756,9 +1756,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
100567 rdp = this_cpu_ptr(rsp->rda);
100568 rcu_preempt_check_blocked_tasks(rnp);
100569 rnp->qsmask = rnp->qsmaskinit;
100570- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
100571+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
100572 WARN_ON_ONCE(rnp->completed != rsp->completed);
100573- ACCESS_ONCE(rnp->completed) = rsp->completed;
100574+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
100575 if (rnp == rdp->mynode)
100576 (void)__note_gp_changes(rsp, rnp, rdp);
100577 rcu_preempt_boost_start_gp(rnp);
100578@@ -1767,7 +1767,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
100579 rnp->grphi, rnp->qsmask);
100580 raw_spin_unlock_irq(&rnp->lock);
100581 cond_resched_rcu_qs();
100582- ACCESS_ONCE(rsp->gp_activity) = jiffies;
100583+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
100584 }
100585
100586 mutex_unlock(&rsp->onoff_mutex);
100587@@ -1784,7 +1784,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
100588 unsigned long maxj;
100589 struct rcu_node *rnp = rcu_get_root(rsp);
100590
100591- ACCESS_ONCE(rsp->gp_activity) = jiffies;
100592+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
100593 rsp->n_force_qs++;
100594 if (fqs_state == RCU_SAVE_DYNTICK) {
100595 /* Collect dyntick-idle snapshots. */
100596@@ -1805,7 +1805,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
100597 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
100598 raw_spin_lock_irq(&rnp->lock);
100599 smp_mb__after_unlock_lock();
100600- ACCESS_ONCE(rsp->gp_flags) =
100601+ ACCESS_ONCE_RW(rsp->gp_flags) =
100602 ACCESS_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS;
100603 raw_spin_unlock_irq(&rnp->lock);
100604 }
100605@@ -1823,7 +1823,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
100606 struct rcu_data *rdp;
100607 struct rcu_node *rnp = rcu_get_root(rsp);
100608
100609- ACCESS_ONCE(rsp->gp_activity) = jiffies;
100610+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
100611 raw_spin_lock_irq(&rnp->lock);
100612 smp_mb__after_unlock_lock();
100613 gp_duration = jiffies - rsp->gp_start;
100614@@ -1852,7 +1852,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
100615 rcu_for_each_node_breadth_first(rsp, rnp) {
100616 raw_spin_lock_irq(&rnp->lock);
100617 smp_mb__after_unlock_lock();
100618- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
100619+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
100620 rdp = this_cpu_ptr(rsp->rda);
100621 if (rnp == rdp->mynode)
100622 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
100623@@ -1860,7 +1860,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
100624 nocb += rcu_future_gp_cleanup(rsp, rnp);
100625 raw_spin_unlock_irq(&rnp->lock);
100626 cond_resched_rcu_qs();
100627- ACCESS_ONCE(rsp->gp_activity) = jiffies;
100628+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
100629 }
100630 rnp = rcu_get_root(rsp);
100631 raw_spin_lock_irq(&rnp->lock);
100632@@ -1868,14 +1868,14 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
100633 rcu_nocb_gp_set(rnp, nocb);
100634
100635 /* Declare grace period done. */
100636- ACCESS_ONCE(rsp->completed) = rsp->gpnum;
100637+ ACCESS_ONCE_RW(rsp->completed) = rsp->gpnum;
100638 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
100639 rsp->fqs_state = RCU_GP_IDLE;
100640 rdp = this_cpu_ptr(rsp->rda);
100641 /* Advance CBs to reduce false positives below. */
100642 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
100643 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
100644- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
100645+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
100646 trace_rcu_grace_period(rsp->name,
100647 ACCESS_ONCE(rsp->gpnum),
100648 TPS("newreq"));
100649@@ -1910,7 +1910,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
100650 if (rcu_gp_init(rsp))
100651 break;
100652 cond_resched_rcu_qs();
100653- ACCESS_ONCE(rsp->gp_activity) = jiffies;
100654+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
100655 WARN_ON(signal_pending(current));
100656 trace_rcu_grace_period(rsp->name,
100657 ACCESS_ONCE(rsp->gpnum),
100658@@ -1954,11 +1954,11 @@ static int __noreturn rcu_gp_kthread(void *arg)
100659 ACCESS_ONCE(rsp->gpnum),
100660 TPS("fqsend"));
100661 cond_resched_rcu_qs();
100662- ACCESS_ONCE(rsp->gp_activity) = jiffies;
100663+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
100664 } else {
100665 /* Deal with stray signal. */
100666 cond_resched_rcu_qs();
100667- ACCESS_ONCE(rsp->gp_activity) = jiffies;
100668+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
100669 WARN_ON(signal_pending(current));
100670 trace_rcu_grace_period(rsp->name,
100671 ACCESS_ONCE(rsp->gpnum),
100672@@ -2003,7 +2003,7 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
100673 */
100674 return false;
100675 }
100676- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
100677+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
100678 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
100679 TPS("newreq"));
100680
100681@@ -2228,7 +2228,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
100682 rsp->qlen += rdp->qlen;
100683 rdp->n_cbs_orphaned += rdp->qlen;
100684 rdp->qlen_lazy = 0;
100685- ACCESS_ONCE(rdp->qlen) = 0;
100686+ ACCESS_ONCE_RW(rdp->qlen) = 0;
100687 }
100688
100689 /*
100690@@ -2490,7 +2490,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
100691 }
100692 smp_mb(); /* List handling before counting for rcu_barrier(). */
100693 rdp->qlen_lazy -= count_lazy;
100694- ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
100695+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen - count;
100696 rdp->n_cbs_invoked += count;
100697
100698 /* Reinstate batch limit if we have worked down the excess. */
100699@@ -2647,7 +2647,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
100700 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
100701 return; /* Someone beat us to it. */
100702 }
100703- ACCESS_ONCE(rsp->gp_flags) =
100704+ ACCESS_ONCE_RW(rsp->gp_flags) =
100705 ACCESS_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS;
100706 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
100707 rcu_gp_kthread_wake(rsp);
100708@@ -2693,7 +2693,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
100709 /*
100710 * Do RCU core processing for the current CPU.
100711 */
100712-static void rcu_process_callbacks(struct softirq_action *unused)
100713+static void rcu_process_callbacks(void)
100714 {
100715 struct rcu_state *rsp;
100716
100717@@ -2805,7 +2805,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
100718 WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
100719 if (debug_rcu_head_queue(head)) {
100720 /* Probable double call_rcu(), so leak the callback. */
100721- ACCESS_ONCE(head->func) = rcu_leak_callback;
100722+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
100723 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
100724 return;
100725 }
100726@@ -2833,7 +2833,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
100727 local_irq_restore(flags);
100728 return;
100729 }
100730- ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
100731+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen + 1;
100732 if (lazy)
100733 rdp->qlen_lazy++;
100734 else
100735@@ -3106,11 +3106,11 @@ void synchronize_sched_expedited(void)
100736 * counter wrap on a 32-bit system. Quite a few more CPUs would of
100737 * course be required on a 64-bit system.
100738 */
100739- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
100740+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
100741 (ulong)atomic_long_read(&rsp->expedited_done) +
100742 ULONG_MAX / 8)) {
100743 synchronize_sched();
100744- atomic_long_inc(&rsp->expedited_wrap);
100745+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
100746 return;
100747 }
100748
100749@@ -3118,12 +3118,12 @@ void synchronize_sched_expedited(void)
100750 * Take a ticket. Note that atomic_inc_return() implies a
100751 * full memory barrier.
100752 */
100753- snap = atomic_long_inc_return(&rsp->expedited_start);
100754+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
100755 firstsnap = snap;
100756 if (!try_get_online_cpus()) {
100757 /* CPU hotplug operation in flight, fall back to normal GP. */
100758 wait_rcu_gp(call_rcu_sched);
100759- atomic_long_inc(&rsp->expedited_normal);
100760+ atomic_long_inc_unchecked(&rsp->expedited_normal);
100761 return;
100762 }
100763 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
100764@@ -3136,7 +3136,7 @@ void synchronize_sched_expedited(void)
100765 for_each_cpu(cpu, cm) {
100766 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
100767
100768- if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1))
100769+ if (!(atomic_add_return_unchecked(0, &rdtp->dynticks) & 0x1))
100770 cpumask_clear_cpu(cpu, cm);
100771 }
100772 if (cpumask_weight(cm) == 0)
100773@@ -3151,14 +3151,14 @@ void synchronize_sched_expedited(void)
100774 synchronize_sched_expedited_cpu_stop,
100775 NULL) == -EAGAIN) {
100776 put_online_cpus();
100777- atomic_long_inc(&rsp->expedited_tryfail);
100778+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
100779
100780 /* Check to see if someone else did our work for us. */
100781 s = atomic_long_read(&rsp->expedited_done);
100782 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
100783 /* ensure test happens before caller kfree */
100784 smp_mb__before_atomic(); /* ^^^ */
100785- atomic_long_inc(&rsp->expedited_workdone1);
100786+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
100787 free_cpumask_var(cm);
100788 return;
100789 }
100790@@ -3168,7 +3168,7 @@ void synchronize_sched_expedited(void)
100791 udelay(trycount * num_online_cpus());
100792 } else {
100793 wait_rcu_gp(call_rcu_sched);
100794- atomic_long_inc(&rsp->expedited_normal);
100795+ atomic_long_inc_unchecked(&rsp->expedited_normal);
100796 free_cpumask_var(cm);
100797 return;
100798 }
100799@@ -3178,7 +3178,7 @@ void synchronize_sched_expedited(void)
100800 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
100801 /* ensure test happens before caller kfree */
100802 smp_mb__before_atomic(); /* ^^^ */
100803- atomic_long_inc(&rsp->expedited_workdone2);
100804+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
100805 free_cpumask_var(cm);
100806 return;
100807 }
100808@@ -3193,14 +3193,14 @@ void synchronize_sched_expedited(void)
100809 if (!try_get_online_cpus()) {
100810 /* CPU hotplug operation in flight, use normal GP. */
100811 wait_rcu_gp(call_rcu_sched);
100812- atomic_long_inc(&rsp->expedited_normal);
100813+ atomic_long_inc_unchecked(&rsp->expedited_normal);
100814 free_cpumask_var(cm);
100815 return;
100816 }
100817- snap = atomic_long_read(&rsp->expedited_start);
100818+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
100819 smp_mb(); /* ensure read is before try_stop_cpus(). */
100820 }
100821- atomic_long_inc(&rsp->expedited_stoppedcpus);
100822+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
100823
100824 all_cpus_idle:
100825 free_cpumask_var(cm);
100826@@ -3212,16 +3212,16 @@ all_cpus_idle:
100827 * than we did already did their update.
100828 */
100829 do {
100830- atomic_long_inc(&rsp->expedited_done_tries);
100831+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
100832 s = atomic_long_read(&rsp->expedited_done);
100833 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
100834 /* ensure test happens before caller kfree */
100835 smp_mb__before_atomic(); /* ^^^ */
100836- atomic_long_inc(&rsp->expedited_done_lost);
100837+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
100838 break;
100839 }
100840 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
100841- atomic_long_inc(&rsp->expedited_done_exit);
100842+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
100843
100844 put_online_cpus();
100845 }
100846@@ -3431,7 +3431,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
100847 * ACCESS_ONCE() to prevent the compiler from speculating
100848 * the increment to precede the early-exit check.
100849 */
100850- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
100851+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
100852 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
100853 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
100854 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
100855@@ -3487,7 +3487,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
100856
100857 /* Increment ->n_barrier_done to prevent duplicate work. */
100858 smp_mb(); /* Keep increment after above mechanism. */
100859- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
100860+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
100861 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
100862 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
100863 smp_mb(); /* Keep increment before caller's subsequent code. */
100864@@ -3532,7 +3532,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
100865 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
100866 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
100867 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
100868- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
100869+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
100870 rdp->cpu = cpu;
100871 rdp->rsp = rsp;
100872 rcu_boot_init_nocb_percpu_data(rdp);
100873@@ -3565,8 +3565,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
100874 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
100875 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
100876 rcu_sysidle_init_percpu_data(rdp->dynticks);
100877- atomic_set(&rdp->dynticks->dynticks,
100878- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
100879+ atomic_set_unchecked(&rdp->dynticks->dynticks,
100880+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
100881 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
100882
100883 /* Add CPU to rcu_node bitmasks. */
100884diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
100885index 119de39..f07d31a 100644
100886--- a/kernel/rcu/tree.h
100887+++ b/kernel/rcu/tree.h
100888@@ -86,11 +86,11 @@ struct rcu_dynticks {
100889 long long dynticks_nesting; /* Track irq/process nesting level. */
100890 /* Process level is worth LLONG_MAX/2. */
100891 int dynticks_nmi_nesting; /* Track NMI nesting level. */
100892- atomic_t dynticks; /* Even value for idle, else odd. */
100893+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
100894 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
100895 long long dynticks_idle_nesting;
100896 /* irq/process nesting level from idle. */
100897- atomic_t dynticks_idle; /* Even value for idle, else odd. */
100898+ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
100899 /* "Idle" excludes userspace execution. */
100900 unsigned long dynticks_idle_jiffies;
100901 /* End of last non-NMI non-idle period. */
100902@@ -457,17 +457,17 @@ struct rcu_state {
100903 /* _rcu_barrier(). */
100904 /* End of fields guarded by barrier_mutex. */
100905
100906- atomic_long_t expedited_start; /* Starting ticket. */
100907- atomic_long_t expedited_done; /* Done ticket. */
100908- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
100909- atomic_long_t expedited_tryfail; /* # acquisition failures. */
100910- atomic_long_t expedited_workdone1; /* # done by others #1. */
100911- atomic_long_t expedited_workdone2; /* # done by others #2. */
100912- atomic_long_t expedited_normal; /* # fallbacks to normal. */
100913- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
100914- atomic_long_t expedited_done_tries; /* # tries to update _done. */
100915- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
100916- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
100917+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
100918+ atomic_long_t expedited_done; /* Done ticket. */
100919+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
100920+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
100921+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
100922+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
100923+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
100924+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
100925+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
100926+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
100927+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
100928
100929 unsigned long jiffies_force_qs; /* Time at which to invoke */
100930 /* force_quiescent_state(). */
100931diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
100932index 0a571e9..fbfd611 100644
100933--- a/kernel/rcu/tree_plugin.h
100934+++ b/kernel/rcu/tree_plugin.h
100935@@ -619,7 +619,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
100936 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
100937 {
100938 return !rcu_preempted_readers_exp(rnp) &&
100939- ACCESS_ONCE(rnp->expmask) == 0;
100940+ ACCESS_ONCE_RW(rnp->expmask) == 0;
100941 }
100942
100943 /*
100944@@ -780,7 +780,7 @@ void synchronize_rcu_expedited(void)
100945
100946 /* Clean up and exit. */
100947 smp_mb(); /* ensure expedited GP seen before counter increment. */
100948- ACCESS_ONCE(sync_rcu_preempt_exp_count) =
100949+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count) =
100950 sync_rcu_preempt_exp_count + 1;
100951 unlock_mb_ret:
100952 mutex_unlock(&sync_rcu_preempt_exp_mutex);
100953@@ -1290,7 +1290,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
100954 free_cpumask_var(cm);
100955 }
100956
100957-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
100958+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
100959 .store = &rcu_cpu_kthread_task,
100960 .thread_should_run = rcu_cpu_kthread_should_run,
100961 .thread_fn = rcu_cpu_kthread,
100962@@ -1761,7 +1761,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
100963 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
100964 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
100965 cpu, ticks_value, ticks_title,
100966- atomic_read(&rdtp->dynticks) & 0xfff,
100967+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
100968 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
100969 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
100970 ACCESS_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
100971@@ -1906,7 +1906,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
100972 return;
100973 if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
100974 /* Prior smp_mb__after_atomic() orders against prior enqueue. */
100975- ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
100976+ ACCESS_ONCE_RW(rdp_leader->nocb_leader_sleep) = false;
100977 wake_up(&rdp_leader->nocb_wq);
100978 }
100979 }
100980@@ -1978,7 +1978,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
100981 atomic_long_add(rhcount, &rdp->nocb_q_count);
100982 /* rcu_barrier() relies on ->nocb_q_count add before xchg. */
100983 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
100984- ACCESS_ONCE(*old_rhpp) = rhp;
100985+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
100986 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
100987 smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
100988
100989@@ -2167,7 +2167,7 @@ wait_again:
100990 continue; /* No CBs here, try next follower. */
100991
100992 /* Move callbacks to wait-for-GP list, which is empty. */
100993- ACCESS_ONCE(rdp->nocb_head) = NULL;
100994+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
100995 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
100996 gotcbs = true;
100997 }
100998@@ -2288,7 +2288,7 @@ static int rcu_nocb_kthread(void *arg)
100999 list = ACCESS_ONCE(rdp->nocb_follower_head);
101000 BUG_ON(!list);
101001 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
101002- ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
101003+ ACCESS_ONCE_RW(rdp->nocb_follower_head) = NULL;
101004 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
101005
101006 /* Each pass through the following loop invokes a callback. */
101007@@ -2338,7 +2338,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
101008 if (!rcu_nocb_need_deferred_wakeup(rdp))
101009 return;
101010 ndw = ACCESS_ONCE(rdp->nocb_defer_wakeup);
101011- ACCESS_ONCE(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
101012+ ACCESS_ONCE_RW(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
101013 wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
101014 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
101015 }
101016@@ -2461,7 +2461,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
101017 t = kthread_run(rcu_nocb_kthread, rdp_spawn,
101018 "rcuo%c/%d", rsp->abbr, cpu);
101019 BUG_ON(IS_ERR(t));
101020- ACCESS_ONCE(rdp_spawn->nocb_kthread) = t;
101021+ ACCESS_ONCE_RW(rdp_spawn->nocb_kthread) = t;
101022 }
101023
101024 /*
101025@@ -2666,11 +2666,11 @@ static void rcu_sysidle_enter(int irq)
101026
101027 /* Record start of fully idle period. */
101028 j = jiffies;
101029- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
101030+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
101031 smp_mb__before_atomic();
101032- atomic_inc(&rdtp->dynticks_idle);
101033+ atomic_inc_unchecked(&rdtp->dynticks_idle);
101034 smp_mb__after_atomic();
101035- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
101036+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
101037 }
101038
101039 /*
101040@@ -2741,9 +2741,9 @@ static void rcu_sysidle_exit(int irq)
101041
101042 /* Record end of idle period. */
101043 smp_mb__before_atomic();
101044- atomic_inc(&rdtp->dynticks_idle);
101045+ atomic_inc_unchecked(&rdtp->dynticks_idle);
101046 smp_mb__after_atomic();
101047- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
101048+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
101049
101050 /*
101051 * If we are the timekeeping CPU, we are permitted to be non-idle
101052@@ -2788,7 +2788,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
101053 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
101054
101055 /* Pick up current idle and NMI-nesting counter and check. */
101056- cur = atomic_read(&rdtp->dynticks_idle);
101057+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
101058 if (cur & 0x1) {
101059 *isidle = false; /* We are not idle! */
101060 return;
101061@@ -2837,7 +2837,7 @@ static void rcu_sysidle(unsigned long j)
101062 case RCU_SYSIDLE_NOT:
101063
101064 /* First time all are idle, so note a short idle period. */
101065- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
101066+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
101067 break;
101068
101069 case RCU_SYSIDLE_SHORT:
101070@@ -2875,7 +2875,7 @@ static void rcu_sysidle_cancel(void)
101071 {
101072 smp_mb();
101073 if (full_sysidle_state > RCU_SYSIDLE_SHORT)
101074- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
101075+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
101076 }
101077
101078 /*
101079@@ -2927,7 +2927,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
101080 smp_mb(); /* grace period precedes setting inuse. */
101081
101082 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
101083- ACCESS_ONCE(rshp->inuse) = 0;
101084+ ACCESS_ONCE_RW(rshp->inuse) = 0;
101085 }
101086
101087 /*
101088@@ -3080,7 +3080,7 @@ static void rcu_bind_gp_kthread(void)
101089 static void rcu_dynticks_task_enter(void)
101090 {
101091 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
101092- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = smp_processor_id();
101093+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = smp_processor_id();
101094 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
101095 }
101096
101097@@ -3088,6 +3088,6 @@ static void rcu_dynticks_task_enter(void)
101098 static void rcu_dynticks_task_exit(void)
101099 {
101100 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
101101- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = -1;
101102+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = -1;
101103 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
101104 }
101105diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
101106index fbb6240..f6c5097 100644
101107--- a/kernel/rcu/tree_trace.c
101108+++ b/kernel/rcu/tree_trace.c
101109@@ -125,7 +125,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
101110 rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu),
101111 rdp->qs_pending);
101112 seq_printf(m, " dt=%d/%llx/%d df=%lu",
101113- atomic_read(&rdp->dynticks->dynticks),
101114+ atomic_read_unchecked(&rdp->dynticks->dynticks),
101115 rdp->dynticks->dynticks_nesting,
101116 rdp->dynticks->dynticks_nmi_nesting,
101117 rdp->dynticks_fqs);
101118@@ -186,17 +186,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
101119 struct rcu_state *rsp = (struct rcu_state *)m->private;
101120
101121 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
101122- atomic_long_read(&rsp->expedited_start),
101123+ atomic_long_read_unchecked(&rsp->expedited_start),
101124 atomic_long_read(&rsp->expedited_done),
101125- atomic_long_read(&rsp->expedited_wrap),
101126- atomic_long_read(&rsp->expedited_tryfail),
101127- atomic_long_read(&rsp->expedited_workdone1),
101128- atomic_long_read(&rsp->expedited_workdone2),
101129- atomic_long_read(&rsp->expedited_normal),
101130- atomic_long_read(&rsp->expedited_stoppedcpus),
101131- atomic_long_read(&rsp->expedited_done_tries),
101132- atomic_long_read(&rsp->expedited_done_lost),
101133- atomic_long_read(&rsp->expedited_done_exit));
101134+ atomic_long_read_unchecked(&rsp->expedited_wrap),
101135+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
101136+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
101137+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
101138+ atomic_long_read_unchecked(&rsp->expedited_normal),
101139+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
101140+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
101141+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
101142+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
101143 return 0;
101144 }
101145
101146diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
101147index e0d31a3..f4dafe3 100644
101148--- a/kernel/rcu/update.c
101149+++ b/kernel/rcu/update.c
101150@@ -342,10 +342,10 @@ int rcu_jiffies_till_stall_check(void)
101151 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
101152 */
101153 if (till_stall_check < 3) {
101154- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
101155+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
101156 till_stall_check = 3;
101157 } else if (till_stall_check > 300) {
101158- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
101159+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
101160 till_stall_check = 300;
101161 }
101162 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
101163@@ -501,7 +501,7 @@ static void check_holdout_task(struct task_struct *t,
101164 !ACCESS_ONCE(t->on_rq) ||
101165 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
101166 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
101167- ACCESS_ONCE(t->rcu_tasks_holdout) = false;
101168+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = false;
101169 list_del_init(&t->rcu_tasks_holdout_list);
101170 put_task_struct(t);
101171 return;
101172@@ -589,7 +589,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
101173 !is_idle_task(t)) {
101174 get_task_struct(t);
101175 t->rcu_tasks_nvcsw = ACCESS_ONCE(t->nvcsw);
101176- ACCESS_ONCE(t->rcu_tasks_holdout) = true;
101177+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = true;
101178 list_add(&t->rcu_tasks_holdout_list,
101179 &rcu_tasks_holdouts);
101180 }
101181@@ -686,7 +686,7 @@ static void rcu_spawn_tasks_kthread(void)
101182 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
101183 BUG_ON(IS_ERR(t));
101184 smp_mb(); /* Ensure others see full kthread. */
101185- ACCESS_ONCE(rcu_tasks_kthread_ptr) = t;
101186+ ACCESS_ONCE_RW(rcu_tasks_kthread_ptr) = t;
101187 mutex_unlock(&rcu_tasks_kthread_mutex);
101188 }
101189
101190diff --git a/kernel/resource.c b/kernel/resource.c
101191index 19f2357..ebe7f35 100644
101192--- a/kernel/resource.c
101193+++ b/kernel/resource.c
101194@@ -162,8 +162,18 @@ static const struct file_operations proc_iomem_operations = {
101195
101196 static int __init ioresources_init(void)
101197 {
101198+#ifdef CONFIG_GRKERNSEC_PROC_ADD
101199+#ifdef CONFIG_GRKERNSEC_PROC_USER
101200+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
101201+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
101202+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
101203+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
101204+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
101205+#endif
101206+#else
101207 proc_create("ioports", 0, NULL, &proc_ioports_operations);
101208 proc_create("iomem", 0, NULL, &proc_iomem_operations);
101209+#endif
101210 return 0;
101211 }
101212 __initcall(ioresources_init);
101213diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
101214index eae160d..c9aa22e 100644
101215--- a/kernel/sched/auto_group.c
101216+++ b/kernel/sched/auto_group.c
101217@@ -11,7 +11,7 @@
101218
101219 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
101220 static struct autogroup autogroup_default;
101221-static atomic_t autogroup_seq_nr;
101222+static atomic_unchecked_t autogroup_seq_nr;
101223
101224 void __init autogroup_init(struct task_struct *init_task)
101225 {
101226@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
101227
101228 kref_init(&ag->kref);
101229 init_rwsem(&ag->lock);
101230- ag->id = atomic_inc_return(&autogroup_seq_nr);
101231+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
101232 ag->tg = tg;
101233 #ifdef CONFIG_RT_GROUP_SCHED
101234 /*
101235diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
101236index 8d0f35d..c16360d 100644
101237--- a/kernel/sched/completion.c
101238+++ b/kernel/sched/completion.c
101239@@ -205,7 +205,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
101240 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
101241 * or number of jiffies left till timeout) if completed.
101242 */
101243-long __sched
101244+long __sched __intentional_overflow(-1)
101245 wait_for_completion_interruptible_timeout(struct completion *x,
101246 unsigned long timeout)
101247 {
101248@@ -222,7 +222,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
101249 *
101250 * Return: -ERESTARTSYS if interrupted, 0 if completed.
101251 */
101252-int __sched wait_for_completion_killable(struct completion *x)
101253+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
101254 {
101255 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
101256 if (t == -ERESTARTSYS)
101257@@ -243,7 +243,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
101258 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
101259 * or number of jiffies left till timeout) if completed.
101260 */
101261-long __sched
101262+long __sched __intentional_overflow(-1)
101263 wait_for_completion_killable_timeout(struct completion *x,
101264 unsigned long timeout)
101265 {
101266diff --git a/kernel/sched/core.c b/kernel/sched/core.c
101267index 3d5f6f6..a94298f 100644
101268--- a/kernel/sched/core.c
101269+++ b/kernel/sched/core.c
101270@@ -1862,7 +1862,7 @@ void set_numabalancing_state(bool enabled)
101271 int sysctl_numa_balancing(struct ctl_table *table, int write,
101272 void __user *buffer, size_t *lenp, loff_t *ppos)
101273 {
101274- struct ctl_table t;
101275+ ctl_table_no_const t;
101276 int err;
101277 int state = numabalancing_enabled;
101278
101279@@ -2312,8 +2312,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
101280 next->active_mm = oldmm;
101281 atomic_inc(&oldmm->mm_count);
101282 enter_lazy_tlb(oldmm, next);
101283- } else
101284+ } else {
101285 switch_mm(oldmm, mm, next);
101286+ populate_stack();
101287+ }
101288
101289 if (!prev->mm) {
101290 prev->active_mm = NULL;
101291@@ -3124,6 +3126,8 @@ int can_nice(const struct task_struct *p, const int nice)
101292 /* convert nice value [19,-20] to rlimit style value [1,40] */
101293 int nice_rlim = nice_to_rlimit(nice);
101294
101295+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
101296+
101297 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
101298 capable(CAP_SYS_NICE));
101299 }
101300@@ -3150,7 +3154,8 @@ SYSCALL_DEFINE1(nice, int, increment)
101301 nice = task_nice(current) + increment;
101302
101303 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
101304- if (increment < 0 && !can_nice(current, nice))
101305+ if (increment < 0 && (!can_nice(current, nice) ||
101306+ gr_handle_chroot_nice()))
101307 return -EPERM;
101308
101309 retval = security_task_setnice(current, nice);
101310@@ -3459,6 +3464,7 @@ recheck:
101311 if (policy != p->policy && !rlim_rtprio)
101312 return -EPERM;
101313
101314+ gr_learn_resource(p, RLIMIT_RTPRIO, attr->sched_priority, 1);
101315 /* can't increase priority */
101316 if (attr->sched_priority > p->rt_priority &&
101317 attr->sched_priority > rlim_rtprio)
101318@@ -4946,6 +4952,7 @@ void idle_task_exit(void)
101319
101320 if (mm != &init_mm) {
101321 switch_mm(mm, &init_mm, current);
101322+ populate_stack();
101323 finish_arch_post_lock_switch();
101324 }
101325 mmdrop(mm);
101326@@ -5041,7 +5048,7 @@ static void migrate_tasks(unsigned int dead_cpu)
101327
101328 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
101329
101330-static struct ctl_table sd_ctl_dir[] = {
101331+static ctl_table_no_const sd_ctl_dir[] __read_only = {
101332 {
101333 .procname = "sched_domain",
101334 .mode = 0555,
101335@@ -5058,17 +5065,17 @@ static struct ctl_table sd_ctl_root[] = {
101336 {}
101337 };
101338
101339-static struct ctl_table *sd_alloc_ctl_entry(int n)
101340+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
101341 {
101342- struct ctl_table *entry =
101343+ ctl_table_no_const *entry =
101344 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
101345
101346 return entry;
101347 }
101348
101349-static void sd_free_ctl_entry(struct ctl_table **tablep)
101350+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
101351 {
101352- struct ctl_table *entry;
101353+ ctl_table_no_const *entry;
101354
101355 /*
101356 * In the intermediate directories, both the child directory and
101357@@ -5076,22 +5083,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
101358 * will always be set. In the lowest directory the names are
101359 * static strings and all have proc handlers.
101360 */
101361- for (entry = *tablep; entry->mode; entry++) {
101362- if (entry->child)
101363- sd_free_ctl_entry(&entry->child);
101364+ for (entry = tablep; entry->mode; entry++) {
101365+ if (entry->child) {
101366+ sd_free_ctl_entry(entry->child);
101367+ pax_open_kernel();
101368+ entry->child = NULL;
101369+ pax_close_kernel();
101370+ }
101371 if (entry->proc_handler == NULL)
101372 kfree(entry->procname);
101373 }
101374
101375- kfree(*tablep);
101376- *tablep = NULL;
101377+ kfree(tablep);
101378 }
101379
101380 static int min_load_idx = 0;
101381 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
101382
101383 static void
101384-set_table_entry(struct ctl_table *entry,
101385+set_table_entry(ctl_table_no_const *entry,
101386 const char *procname, void *data, int maxlen,
101387 umode_t mode, proc_handler *proc_handler,
101388 bool load_idx)
101389@@ -5111,7 +5121,7 @@ set_table_entry(struct ctl_table *entry,
101390 static struct ctl_table *
101391 sd_alloc_ctl_domain_table(struct sched_domain *sd)
101392 {
101393- struct ctl_table *table = sd_alloc_ctl_entry(14);
101394+ ctl_table_no_const *table = sd_alloc_ctl_entry(14);
101395
101396 if (table == NULL)
101397 return NULL;
101398@@ -5149,9 +5159,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
101399 return table;
101400 }
101401
101402-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
101403+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
101404 {
101405- struct ctl_table *entry, *table;
101406+ ctl_table_no_const *entry, *table;
101407 struct sched_domain *sd;
101408 int domain_num = 0, i;
101409 char buf[32];
101410@@ -5178,11 +5188,13 @@ static struct ctl_table_header *sd_sysctl_header;
101411 static void register_sched_domain_sysctl(void)
101412 {
101413 int i, cpu_num = num_possible_cpus();
101414- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
101415+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
101416 char buf[32];
101417
101418 WARN_ON(sd_ctl_dir[0].child);
101419+ pax_open_kernel();
101420 sd_ctl_dir[0].child = entry;
101421+ pax_close_kernel();
101422
101423 if (entry == NULL)
101424 return;
101425@@ -5205,8 +5217,12 @@ static void unregister_sched_domain_sysctl(void)
101426 if (sd_sysctl_header)
101427 unregister_sysctl_table(sd_sysctl_header);
101428 sd_sysctl_header = NULL;
101429- if (sd_ctl_dir[0].child)
101430- sd_free_ctl_entry(&sd_ctl_dir[0].child);
101431+ if (sd_ctl_dir[0].child) {
101432+ sd_free_ctl_entry(sd_ctl_dir[0].child);
101433+ pax_open_kernel();
101434+ sd_ctl_dir[0].child = NULL;
101435+ pax_close_kernel();
101436+ }
101437 }
101438 #else
101439 static void register_sched_domain_sysctl(void)
101440diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
101441index 241213b..6a64c91 100644
101442--- a/kernel/sched/fair.c
101443+++ b/kernel/sched/fair.c
101444@@ -2092,7 +2092,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
101445
101446 static void reset_ptenuma_scan(struct task_struct *p)
101447 {
101448- ACCESS_ONCE(p->mm->numa_scan_seq)++;
101449+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
101450 p->mm->numa_scan_offset = 0;
101451 }
101452
101453@@ -7656,7 +7656,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
101454 * run_rebalance_domains is triggered when needed from the scheduler tick.
101455 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
101456 */
101457-static void run_rebalance_domains(struct softirq_action *h)
101458+static __latent_entropy void run_rebalance_domains(void)
101459 {
101460 struct rq *this_rq = this_rq();
101461 enum cpu_idle_type idle = this_rq->idle_balance ?
101462diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
101463index dc0f435..ae2e085 100644
101464--- a/kernel/sched/sched.h
101465+++ b/kernel/sched/sched.h
101466@@ -1200,7 +1200,7 @@ struct sched_class {
101467 #ifdef CONFIG_FAIR_GROUP_SCHED
101468 void (*task_move_group) (struct task_struct *p, int on_rq);
101469 #endif
101470-};
101471+} __do_const;
101472
101473 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
101474 {
101475diff --git a/kernel/signal.c b/kernel/signal.c
101476index a390499..ebe9a21 100644
101477--- a/kernel/signal.c
101478+++ b/kernel/signal.c
101479@@ -53,12 +53,12 @@ static struct kmem_cache *sigqueue_cachep;
101480
101481 int print_fatal_signals __read_mostly;
101482
101483-static void __user *sig_handler(struct task_struct *t, int sig)
101484+static __sighandler_t sig_handler(struct task_struct *t, int sig)
101485 {
101486 return t->sighand->action[sig - 1].sa.sa_handler;
101487 }
101488
101489-static int sig_handler_ignored(void __user *handler, int sig)
101490+static int sig_handler_ignored(__sighandler_t handler, int sig)
101491 {
101492 /* Is it explicitly or implicitly ignored? */
101493 return handler == SIG_IGN ||
101494@@ -67,7 +67,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
101495
101496 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
101497 {
101498- void __user *handler;
101499+ __sighandler_t handler;
101500
101501 handler = sig_handler(t, sig);
101502
101503@@ -372,6 +372,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
101504 atomic_inc(&user->sigpending);
101505 rcu_read_unlock();
101506
101507+ if (!override_rlimit)
101508+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
101509+
101510 if (override_rlimit ||
101511 atomic_read(&user->sigpending) <=
101512 task_rlimit(t, RLIMIT_SIGPENDING)) {
101513@@ -499,7 +502,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
101514
101515 int unhandled_signal(struct task_struct *tsk, int sig)
101516 {
101517- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
101518+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
101519 if (is_global_init(tsk))
101520 return 1;
101521 if (handler != SIG_IGN && handler != SIG_DFL)
101522@@ -793,6 +796,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
101523 }
101524 }
101525
101526+ /* allow glibc communication via tgkill to other threads in our
101527+ thread group */
101528+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
101529+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
101530+ && gr_handle_signal(t, sig))
101531+ return -EPERM;
101532+
101533 return security_task_kill(t, info, sig, 0);
101534 }
101535
101536@@ -1176,7 +1186,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
101537 return send_signal(sig, info, p, 1);
101538 }
101539
101540-static int
101541+int
101542 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
101543 {
101544 return send_signal(sig, info, t, 0);
101545@@ -1213,6 +1223,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
101546 unsigned long int flags;
101547 int ret, blocked, ignored;
101548 struct k_sigaction *action;
101549+ int is_unhandled = 0;
101550
101551 spin_lock_irqsave(&t->sighand->siglock, flags);
101552 action = &t->sighand->action[sig-1];
101553@@ -1227,9 +1238,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
101554 }
101555 if (action->sa.sa_handler == SIG_DFL)
101556 t->signal->flags &= ~SIGNAL_UNKILLABLE;
101557+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
101558+ is_unhandled = 1;
101559 ret = specific_send_sig_info(sig, info, t);
101560 spin_unlock_irqrestore(&t->sighand->siglock, flags);
101561
101562+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
101563+ normal operation */
101564+ if (is_unhandled) {
101565+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
101566+ gr_handle_crash(t, sig);
101567+ }
101568+
101569 return ret;
101570 }
101571
101572@@ -1310,8 +1330,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
101573 ret = check_kill_permission(sig, info, p);
101574 rcu_read_unlock();
101575
101576- if (!ret && sig)
101577+ if (!ret && sig) {
101578 ret = do_send_sig_info(sig, info, p, true);
101579+ if (!ret)
101580+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
101581+ }
101582
101583 return ret;
101584 }
101585@@ -2915,7 +2938,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
101586 int error = -ESRCH;
101587
101588 rcu_read_lock();
101589- p = find_task_by_vpid(pid);
101590+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
101591+ /* allow glibc communication via tgkill to other threads in our
101592+ thread group */
101593+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
101594+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
101595+ p = find_task_by_vpid_unrestricted(pid);
101596+ else
101597+#endif
101598+ p = find_task_by_vpid(pid);
101599 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
101600 error = check_kill_permission(sig, info, p);
101601 /*
101602@@ -3248,8 +3279,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
101603 }
101604 seg = get_fs();
101605 set_fs(KERNEL_DS);
101606- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
101607- (stack_t __force __user *) &uoss,
101608+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
101609+ (stack_t __force_user *) &uoss,
101610 compat_user_stack_pointer());
101611 set_fs(seg);
101612 if (ret >= 0 && uoss_ptr) {
101613diff --git a/kernel/smpboot.c b/kernel/smpboot.c
101614index 40190f2..8861d40 100644
101615--- a/kernel/smpboot.c
101616+++ b/kernel/smpboot.c
101617@@ -290,7 +290,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
101618 }
101619 smpboot_unpark_thread(plug_thread, cpu);
101620 }
101621- list_add(&plug_thread->list, &hotplug_threads);
101622+ pax_list_add(&plug_thread->list, &hotplug_threads);
101623 out:
101624 mutex_unlock(&smpboot_threads_lock);
101625 put_online_cpus();
101626@@ -308,7 +308,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
101627 {
101628 get_online_cpus();
101629 mutex_lock(&smpboot_threads_lock);
101630- list_del(&plug_thread->list);
101631+ pax_list_del(&plug_thread->list);
101632 smpboot_destroy_threads(plug_thread);
101633 mutex_unlock(&smpboot_threads_lock);
101634 put_online_cpus();
101635diff --git a/kernel/softirq.c b/kernel/softirq.c
101636index 479e443..66d845e1 100644
101637--- a/kernel/softirq.c
101638+++ b/kernel/softirq.c
101639@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
101640 EXPORT_SYMBOL(irq_stat);
101641 #endif
101642
101643-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
101644+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
101645
101646 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
101647
101648@@ -270,7 +270,7 @@ restart:
101649 kstat_incr_softirqs_this_cpu(vec_nr);
101650
101651 trace_softirq_entry(vec_nr);
101652- h->action(h);
101653+ h->action();
101654 trace_softirq_exit(vec_nr);
101655 if (unlikely(prev_count != preempt_count())) {
101656 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
101657@@ -430,7 +430,7 @@ void __raise_softirq_irqoff(unsigned int nr)
101658 or_softirq_pending(1UL << nr);
101659 }
101660
101661-void open_softirq(int nr, void (*action)(struct softirq_action *))
101662+void __init open_softirq(int nr, void (*action)(void))
101663 {
101664 softirq_vec[nr].action = action;
101665 }
101666@@ -482,7 +482,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
101667 }
101668 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
101669
101670-static void tasklet_action(struct softirq_action *a)
101671+static void tasklet_action(void)
101672 {
101673 struct tasklet_struct *list;
101674
101675@@ -518,7 +518,7 @@ static void tasklet_action(struct softirq_action *a)
101676 }
101677 }
101678
101679-static void tasklet_hi_action(struct softirq_action *a)
101680+static __latent_entropy void tasklet_hi_action(void)
101681 {
101682 struct tasklet_struct *list;
101683
101684@@ -744,7 +744,7 @@ static struct notifier_block cpu_nfb = {
101685 .notifier_call = cpu_callback
101686 };
101687
101688-static struct smp_hotplug_thread softirq_threads = {
101689+static struct smp_hotplug_thread softirq_threads __read_only = {
101690 .store = &ksoftirqd,
101691 .thread_should_run = ksoftirqd_should_run,
101692 .thread_fn = run_ksoftirqd,
101693diff --git a/kernel/sys.c b/kernel/sys.c
101694index a03d9cd..55dbe9c 100644
101695--- a/kernel/sys.c
101696+++ b/kernel/sys.c
101697@@ -160,6 +160,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
101698 error = -EACCES;
101699 goto out;
101700 }
101701+
101702+ if (gr_handle_chroot_setpriority(p, niceval)) {
101703+ error = -EACCES;
101704+ goto out;
101705+ }
101706+
101707 no_nice = security_task_setnice(p, niceval);
101708 if (no_nice) {
101709 error = no_nice;
101710@@ -365,6 +371,20 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
101711 goto error;
101712 }
101713
101714+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
101715+ goto error;
101716+
101717+ if (!gid_eq(new->gid, old->gid)) {
101718+ /* make sure we generate a learn log for what will
101719+ end up being a role transition after a full-learning
101720+ policy is generated
101721+ CAP_SETGID is required to perform a transition
101722+ we may not log a CAP_SETGID check above, e.g.
101723+ in the case where new rgid = old egid
101724+ */
101725+ gr_learn_cap(current, new, CAP_SETGID);
101726+ }
101727+
101728 if (rgid != (gid_t) -1 ||
101729 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
101730 new->sgid = new->egid;
101731@@ -400,6 +420,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
101732 old = current_cred();
101733
101734 retval = -EPERM;
101735+
101736+ if (gr_check_group_change(kgid, kgid, kgid))
101737+ goto error;
101738+
101739 if (ns_capable(old->user_ns, CAP_SETGID))
101740 new->gid = new->egid = new->sgid = new->fsgid = kgid;
101741 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
101742@@ -417,7 +441,7 @@ error:
101743 /*
101744 * change the user struct in a credentials set to match the new UID
101745 */
101746-static int set_user(struct cred *new)
101747+int set_user(struct cred *new)
101748 {
101749 struct user_struct *new_user;
101750
101751@@ -497,7 +521,18 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
101752 goto error;
101753 }
101754
101755+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
101756+ goto error;
101757+
101758 if (!uid_eq(new->uid, old->uid)) {
101759+ /* make sure we generate a learn log for what will
101760+ end up being a role transition after a full-learning
101761+ policy is generated
101762+ CAP_SETUID is required to perform a transition
101763+ we may not log a CAP_SETUID check above, e.g.
101764+ in the case where new ruid = old euid
101765+ */
101766+ gr_learn_cap(current, new, CAP_SETUID);
101767 retval = set_user(new);
101768 if (retval < 0)
101769 goto error;
101770@@ -547,6 +582,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
101771 old = current_cred();
101772
101773 retval = -EPERM;
101774+
101775+ if (gr_check_crash_uid(kuid))
101776+ goto error;
101777+ if (gr_check_user_change(kuid, kuid, kuid))
101778+ goto error;
101779+
101780 if (ns_capable(old->user_ns, CAP_SETUID)) {
101781 new->suid = new->uid = kuid;
101782 if (!uid_eq(kuid, old->uid)) {
101783@@ -616,6 +657,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
101784 goto error;
101785 }
101786
101787+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
101788+ goto error;
101789+
101790 if (ruid != (uid_t) -1) {
101791 new->uid = kruid;
101792 if (!uid_eq(kruid, old->uid)) {
101793@@ -700,6 +744,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
101794 goto error;
101795 }
101796
101797+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
101798+ goto error;
101799+
101800 if (rgid != (gid_t) -1)
101801 new->gid = krgid;
101802 if (egid != (gid_t) -1)
101803@@ -764,12 +811,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
101804 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
101805 ns_capable(old->user_ns, CAP_SETUID)) {
101806 if (!uid_eq(kuid, old->fsuid)) {
101807+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
101808+ goto error;
101809+
101810 new->fsuid = kuid;
101811 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
101812 goto change_okay;
101813 }
101814 }
101815
101816+error:
101817 abort_creds(new);
101818 return old_fsuid;
101819
101820@@ -802,12 +853,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
101821 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
101822 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
101823 ns_capable(old->user_ns, CAP_SETGID)) {
101824+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
101825+ goto error;
101826+
101827 if (!gid_eq(kgid, old->fsgid)) {
101828 new->fsgid = kgid;
101829 goto change_okay;
101830 }
101831 }
101832
101833+error:
101834 abort_creds(new);
101835 return old_fsgid;
101836
101837@@ -1185,19 +1240,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
101838 return -EFAULT;
101839
101840 down_read(&uts_sem);
101841- error = __copy_to_user(&name->sysname, &utsname()->sysname,
101842+ error = __copy_to_user(name->sysname, &utsname()->sysname,
101843 __OLD_UTS_LEN);
101844 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
101845- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
101846+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
101847 __OLD_UTS_LEN);
101848 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
101849- error |= __copy_to_user(&name->release, &utsname()->release,
101850+ error |= __copy_to_user(name->release, &utsname()->release,
101851 __OLD_UTS_LEN);
101852 error |= __put_user(0, name->release + __OLD_UTS_LEN);
101853- error |= __copy_to_user(&name->version, &utsname()->version,
101854+ error |= __copy_to_user(name->version, &utsname()->version,
101855 __OLD_UTS_LEN);
101856 error |= __put_user(0, name->version + __OLD_UTS_LEN);
101857- error |= __copy_to_user(&name->machine, &utsname()->machine,
101858+ error |= __copy_to_user(name->machine, &utsname()->machine,
101859 __OLD_UTS_LEN);
101860 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
101861 up_read(&uts_sem);
101862@@ -1398,6 +1453,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
101863 */
101864 new_rlim->rlim_cur = 1;
101865 }
101866+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
101867+ is changed to a lower value. Since tasks can be created by the same
101868+ user in between this limit change and an execve by this task, force
101869+ a recheck only for this task by setting PF_NPROC_EXCEEDED
101870+ */
101871+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
101872+ tsk->flags |= PF_NPROC_EXCEEDED;
101873 }
101874 if (!retval) {
101875 if (old_rlim)
101876diff --git a/kernel/sysctl.c b/kernel/sysctl.c
101877index ce410bb..cd276f0 100644
101878--- a/kernel/sysctl.c
101879+++ b/kernel/sysctl.c
101880@@ -94,7 +94,6 @@
101881
101882
101883 #if defined(CONFIG_SYSCTL)
101884-
101885 /* External variables not in a header file. */
101886 extern int max_threads;
101887 extern int suid_dumpable;
101888@@ -115,19 +114,20 @@ extern int sysctl_nr_trim_pages;
101889
101890 /* Constants used for minimum and maximum */
101891 #ifdef CONFIG_LOCKUP_DETECTOR
101892-static int sixty = 60;
101893+static int sixty __read_only = 60;
101894 #endif
101895
101896-static int __maybe_unused neg_one = -1;
101897+static int __maybe_unused neg_one __read_only = -1;
101898
101899-static int zero;
101900-static int __maybe_unused one = 1;
101901-static int __maybe_unused two = 2;
101902-static int __maybe_unused four = 4;
101903-static unsigned long one_ul = 1;
101904-static int one_hundred = 100;
101905+static int zero __read_only = 0;
101906+static int __maybe_unused one __read_only = 1;
101907+static int __maybe_unused two __read_only = 2;
101908+static int __maybe_unused three __read_only = 3;
101909+static int __maybe_unused four __read_only = 4;
101910+static unsigned long one_ul __read_only = 1;
101911+static int one_hundred __read_only = 100;
101912 #ifdef CONFIG_PRINTK
101913-static int ten_thousand = 10000;
101914+static int ten_thousand __read_only = 10000;
101915 #endif
101916
101917 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
101918@@ -181,10 +181,8 @@ static int proc_taint(struct ctl_table *table, int write,
101919 void __user *buffer, size_t *lenp, loff_t *ppos);
101920 #endif
101921
101922-#ifdef CONFIG_PRINTK
101923 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
101924 void __user *buffer, size_t *lenp, loff_t *ppos);
101925-#endif
101926
101927 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
101928 void __user *buffer, size_t *lenp, loff_t *ppos);
101929@@ -215,6 +213,8 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write,
101930
101931 #endif
101932
101933+extern struct ctl_table grsecurity_table[];
101934+
101935 static struct ctl_table kern_table[];
101936 static struct ctl_table vm_table[];
101937 static struct ctl_table fs_table[];
101938@@ -229,6 +229,20 @@ extern struct ctl_table epoll_table[];
101939 int sysctl_legacy_va_layout;
101940 #endif
101941
101942+#ifdef CONFIG_PAX_SOFTMODE
101943+static struct ctl_table pax_table[] = {
101944+ {
101945+ .procname = "softmode",
101946+ .data = &pax_softmode,
101947+ .maxlen = sizeof(unsigned int),
101948+ .mode = 0600,
101949+ .proc_handler = &proc_dointvec,
101950+ },
101951+
101952+ { }
101953+};
101954+#endif
101955+
101956 /* The default sysctl tables: */
101957
101958 static struct ctl_table sysctl_base_table[] = {
101959@@ -277,6 +291,22 @@ static int max_extfrag_threshold = 1000;
101960 #endif
101961
101962 static struct ctl_table kern_table[] = {
101963+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
101964+ {
101965+ .procname = "grsecurity",
101966+ .mode = 0500,
101967+ .child = grsecurity_table,
101968+ },
101969+#endif
101970+
101971+#ifdef CONFIG_PAX_SOFTMODE
101972+ {
101973+ .procname = "pax",
101974+ .mode = 0500,
101975+ .child = pax_table,
101976+ },
101977+#endif
101978+
101979 {
101980 .procname = "sched_child_runs_first",
101981 .data = &sysctl_sched_child_runs_first,
101982@@ -649,7 +679,7 @@ static struct ctl_table kern_table[] = {
101983 .data = &modprobe_path,
101984 .maxlen = KMOD_PATH_LEN,
101985 .mode = 0644,
101986- .proc_handler = proc_dostring,
101987+ .proc_handler = proc_dostring_modpriv,
101988 },
101989 {
101990 .procname = "modules_disabled",
101991@@ -816,16 +846,20 @@ static struct ctl_table kern_table[] = {
101992 .extra1 = &zero,
101993 .extra2 = &one,
101994 },
101995+#endif
101996 {
101997 .procname = "kptr_restrict",
101998 .data = &kptr_restrict,
101999 .maxlen = sizeof(int),
102000 .mode = 0644,
102001 .proc_handler = proc_dointvec_minmax_sysadmin,
102002+#ifdef CONFIG_GRKERNSEC_HIDESYM
102003+ .extra1 = &two,
102004+#else
102005 .extra1 = &zero,
102006+#endif
102007 .extra2 = &two,
102008 },
102009-#endif
102010 {
102011 .procname = "ngroups_max",
102012 .data = &ngroups_max,
102013@@ -1072,10 +1106,17 @@ static struct ctl_table kern_table[] = {
102014 */
102015 {
102016 .procname = "perf_event_paranoid",
102017- .data = &sysctl_perf_event_paranoid,
102018- .maxlen = sizeof(sysctl_perf_event_paranoid),
102019+ .data = &sysctl_perf_event_legitimately_concerned,
102020+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
102021 .mode = 0644,
102022- .proc_handler = proc_dointvec,
102023+ /* go ahead, be a hero */
102024+ .proc_handler = proc_dointvec_minmax_sysadmin,
102025+ .extra1 = &neg_one,
102026+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
102027+ .extra2 = &three,
102028+#else
102029+ .extra2 = &two,
102030+#endif
102031 },
102032 {
102033 .procname = "perf_event_mlock_kb",
102034@@ -1348,6 +1389,13 @@ static struct ctl_table vm_table[] = {
102035 .proc_handler = proc_dointvec_minmax,
102036 .extra1 = &zero,
102037 },
102038+ {
102039+ .procname = "heap_stack_gap",
102040+ .data = &sysctl_heap_stack_gap,
102041+ .maxlen = sizeof(sysctl_heap_stack_gap),
102042+ .mode = 0644,
102043+ .proc_handler = proc_doulongvec_minmax,
102044+ },
102045 #else
102046 {
102047 .procname = "nr_trim_pages",
102048@@ -1830,6 +1878,16 @@ int proc_dostring(struct ctl_table *table, int write,
102049 (char __user *)buffer, lenp, ppos);
102050 }
102051
102052+int proc_dostring_modpriv(struct ctl_table *table, int write,
102053+ void __user *buffer, size_t *lenp, loff_t *ppos)
102054+{
102055+ if (write && !capable(CAP_SYS_MODULE))
102056+ return -EPERM;
102057+
102058+ return _proc_do_string(table->data, table->maxlen, write,
102059+ buffer, lenp, ppos);
102060+}
102061+
102062 static size_t proc_skip_spaces(char **buf)
102063 {
102064 size_t ret;
102065@@ -1935,6 +1993,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
102066 len = strlen(tmp);
102067 if (len > *size)
102068 len = *size;
102069+ if (len > sizeof(tmp))
102070+ len = sizeof(tmp);
102071 if (copy_to_user(*buf, tmp, len))
102072 return -EFAULT;
102073 *size -= len;
102074@@ -2112,7 +2172,7 @@ int proc_dointvec(struct ctl_table *table, int write,
102075 static int proc_taint(struct ctl_table *table, int write,
102076 void __user *buffer, size_t *lenp, loff_t *ppos)
102077 {
102078- struct ctl_table t;
102079+ ctl_table_no_const t;
102080 unsigned long tmptaint = get_taint();
102081 int err;
102082
102083@@ -2140,7 +2200,6 @@ static int proc_taint(struct ctl_table *table, int write,
102084 return err;
102085 }
102086
102087-#ifdef CONFIG_PRINTK
102088 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
102089 void __user *buffer, size_t *lenp, loff_t *ppos)
102090 {
102091@@ -2149,7 +2208,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
102092
102093 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
102094 }
102095-#endif
102096
102097 struct do_proc_dointvec_minmax_conv_param {
102098 int *min;
102099@@ -2709,6 +2767,12 @@ int proc_dostring(struct ctl_table *table, int write,
102100 return -ENOSYS;
102101 }
102102
102103+int proc_dostring_modpriv(struct ctl_table *table, int write,
102104+ void __user *buffer, size_t *lenp, loff_t *ppos)
102105+{
102106+ return -ENOSYS;
102107+}
102108+
102109 int proc_dointvec(struct ctl_table *table, int write,
102110 void __user *buffer, size_t *lenp, loff_t *ppos)
102111 {
102112@@ -2765,5 +2829,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
102113 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
102114 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
102115 EXPORT_SYMBOL(proc_dostring);
102116+EXPORT_SYMBOL(proc_dostring_modpriv);
102117 EXPORT_SYMBOL(proc_doulongvec_minmax);
102118 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
102119diff --git a/kernel/taskstats.c b/kernel/taskstats.c
102120index 21f82c2..c1984e5 100644
102121--- a/kernel/taskstats.c
102122+++ b/kernel/taskstats.c
102123@@ -28,9 +28,12 @@
102124 #include <linux/fs.h>
102125 #include <linux/file.h>
102126 #include <linux/pid_namespace.h>
102127+#include <linux/grsecurity.h>
102128 #include <net/genetlink.h>
102129 #include <linux/atomic.h>
102130
102131+extern int gr_is_taskstats_denied(int pid);
102132+
102133 /*
102134 * Maximum length of a cpumask that can be specified in
102135 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
102136@@ -567,6 +570,9 @@ err:
102137
102138 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
102139 {
102140+ if (gr_is_taskstats_denied(current->pid))
102141+ return -EACCES;
102142+
102143 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
102144 return cmd_attr_register_cpumask(info);
102145 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
102146diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
102147index 1b001ed..55ef9e4 100644
102148--- a/kernel/time/alarmtimer.c
102149+++ b/kernel/time/alarmtimer.c
102150@@ -823,7 +823,7 @@ static int __init alarmtimer_init(void)
102151 struct platform_device *pdev;
102152 int error = 0;
102153 int i;
102154- struct k_clock alarm_clock = {
102155+ static struct k_clock alarm_clock = {
102156 .clock_getres = alarm_clock_getres,
102157 .clock_get = alarm_clock_get,
102158 .timer_create = alarm_timer_create,
102159diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
102160index bee0c1f..a23fe2d 100644
102161--- a/kernel/time/hrtimer.c
102162+++ b/kernel/time/hrtimer.c
102163@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
102164 local_irq_restore(flags);
102165 }
102166
102167-static void run_hrtimer_softirq(struct softirq_action *h)
102168+static __latent_entropy void run_hrtimer_softirq(void)
102169 {
102170 hrtimer_peek_ahead_timers();
102171 }
102172diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
102173index 0075da7..63cc872 100644
102174--- a/kernel/time/posix-cpu-timers.c
102175+++ b/kernel/time/posix-cpu-timers.c
102176@@ -1449,14 +1449,14 @@ struct k_clock clock_posix_cpu = {
102177
102178 static __init int init_posix_cpu_timers(void)
102179 {
102180- struct k_clock process = {
102181+ static struct k_clock process = {
102182 .clock_getres = process_cpu_clock_getres,
102183 .clock_get = process_cpu_clock_get,
102184 .timer_create = process_cpu_timer_create,
102185 .nsleep = process_cpu_nsleep,
102186 .nsleep_restart = process_cpu_nsleep_restart,
102187 };
102188- struct k_clock thread = {
102189+ static struct k_clock thread = {
102190 .clock_getres = thread_cpu_clock_getres,
102191 .clock_get = thread_cpu_clock_get,
102192 .timer_create = thread_cpu_timer_create,
102193diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
102194index 31ea01f..7fc61ef 100644
102195--- a/kernel/time/posix-timers.c
102196+++ b/kernel/time/posix-timers.c
102197@@ -43,6 +43,7 @@
102198 #include <linux/hash.h>
102199 #include <linux/posix-clock.h>
102200 #include <linux/posix-timers.h>
102201+#include <linux/grsecurity.h>
102202 #include <linux/syscalls.h>
102203 #include <linux/wait.h>
102204 #include <linux/workqueue.h>
102205@@ -124,7 +125,7 @@ static DEFINE_SPINLOCK(hash_lock);
102206 * which we beg off on and pass to do_sys_settimeofday().
102207 */
102208
102209-static struct k_clock posix_clocks[MAX_CLOCKS];
102210+static struct k_clock *posix_clocks[MAX_CLOCKS];
102211
102212 /*
102213 * These ones are defined below.
102214@@ -277,7 +278,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
102215 */
102216 static __init int init_posix_timers(void)
102217 {
102218- struct k_clock clock_realtime = {
102219+ static struct k_clock clock_realtime = {
102220 .clock_getres = hrtimer_get_res,
102221 .clock_get = posix_clock_realtime_get,
102222 .clock_set = posix_clock_realtime_set,
102223@@ -289,7 +290,7 @@ static __init int init_posix_timers(void)
102224 .timer_get = common_timer_get,
102225 .timer_del = common_timer_del,
102226 };
102227- struct k_clock clock_monotonic = {
102228+ static struct k_clock clock_monotonic = {
102229 .clock_getres = hrtimer_get_res,
102230 .clock_get = posix_ktime_get_ts,
102231 .nsleep = common_nsleep,
102232@@ -299,19 +300,19 @@ static __init int init_posix_timers(void)
102233 .timer_get = common_timer_get,
102234 .timer_del = common_timer_del,
102235 };
102236- struct k_clock clock_monotonic_raw = {
102237+ static struct k_clock clock_monotonic_raw = {
102238 .clock_getres = hrtimer_get_res,
102239 .clock_get = posix_get_monotonic_raw,
102240 };
102241- struct k_clock clock_realtime_coarse = {
102242+ static struct k_clock clock_realtime_coarse = {
102243 .clock_getres = posix_get_coarse_res,
102244 .clock_get = posix_get_realtime_coarse,
102245 };
102246- struct k_clock clock_monotonic_coarse = {
102247+ static struct k_clock clock_monotonic_coarse = {
102248 .clock_getres = posix_get_coarse_res,
102249 .clock_get = posix_get_monotonic_coarse,
102250 };
102251- struct k_clock clock_tai = {
102252+ static struct k_clock clock_tai = {
102253 .clock_getres = hrtimer_get_res,
102254 .clock_get = posix_get_tai,
102255 .nsleep = common_nsleep,
102256@@ -321,7 +322,7 @@ static __init int init_posix_timers(void)
102257 .timer_get = common_timer_get,
102258 .timer_del = common_timer_del,
102259 };
102260- struct k_clock clock_boottime = {
102261+ static struct k_clock clock_boottime = {
102262 .clock_getres = hrtimer_get_res,
102263 .clock_get = posix_get_boottime,
102264 .nsleep = common_nsleep,
102265@@ -533,7 +534,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
102266 return;
102267 }
102268
102269- posix_clocks[clock_id] = *new_clock;
102270+ posix_clocks[clock_id] = new_clock;
102271 }
102272 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
102273
102274@@ -579,9 +580,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
102275 return (id & CLOCKFD_MASK) == CLOCKFD ?
102276 &clock_posix_dynamic : &clock_posix_cpu;
102277
102278- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
102279+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
102280 return NULL;
102281- return &posix_clocks[id];
102282+ return posix_clocks[id];
102283 }
102284
102285 static int common_timer_create(struct k_itimer *new_timer)
102286@@ -599,7 +600,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
102287 struct k_clock *kc = clockid_to_kclock(which_clock);
102288 struct k_itimer *new_timer;
102289 int error, new_timer_id;
102290- sigevent_t event;
102291+ sigevent_t event = { };
102292 int it_id_set = IT_ID_NOT_SET;
102293
102294 if (!kc)
102295@@ -1014,6 +1015,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
102296 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
102297 return -EFAULT;
102298
102299+ /* only the CLOCK_REALTIME clock can be set, all other clocks
102300+ have their clock_set fptr set to a nosettime dummy function
102301+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
102302+ call common_clock_set, which calls do_sys_settimeofday, which
102303+ we hook
102304+ */
102305+
102306 return kc->clock_set(which_clock, &new_tp);
102307 }
102308
102309diff --git a/kernel/time/time.c b/kernel/time/time.c
102310index 2c85b77..6530536 100644
102311--- a/kernel/time/time.c
102312+++ b/kernel/time/time.c
102313@@ -173,6 +173,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
102314 return error;
102315
102316 if (tz) {
102317+ /* we log in do_settimeofday called below, so don't log twice
102318+ */
102319+ if (!tv)
102320+ gr_log_timechange();
102321+
102322 sys_tz = *tz;
102323 update_vsyscall_tz();
102324 if (firsttime) {
102325diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
102326index 91db941..a371671 100644
102327--- a/kernel/time/timekeeping.c
102328+++ b/kernel/time/timekeeping.c
102329@@ -15,6 +15,7 @@
102330 #include <linux/init.h>
102331 #include <linux/mm.h>
102332 #include <linux/sched.h>
102333+#include <linux/grsecurity.h>
102334 #include <linux/syscore_ops.h>
102335 #include <linux/clocksource.h>
102336 #include <linux/jiffies.h>
102337@@ -802,6 +803,8 @@ int do_settimeofday64(const struct timespec64 *ts)
102338 if (!timespec64_valid_strict(ts))
102339 return -EINVAL;
102340
102341+ gr_log_timechange();
102342+
102343 raw_spin_lock_irqsave(&timekeeper_lock, flags);
102344 write_seqcount_begin(&tk_core.seq);
102345
102346diff --git a/kernel/time/timer.c b/kernel/time/timer.c
102347index 2d3f5c5..7ed7dc5 100644
102348--- a/kernel/time/timer.c
102349+++ b/kernel/time/timer.c
102350@@ -1393,7 +1393,7 @@ void update_process_times(int user_tick)
102351 /*
102352 * This function runs timers and the timer-tq in bottom half context.
102353 */
102354-static void run_timer_softirq(struct softirq_action *h)
102355+static __latent_entropy void run_timer_softirq(void)
102356 {
102357 struct tvec_base *base = __this_cpu_read(tvec_bases);
102358
102359@@ -1456,7 +1456,7 @@ static void process_timeout(unsigned long __data)
102360 *
102361 * In all cases the return value is guaranteed to be non-negative.
102362 */
102363-signed long __sched schedule_timeout(signed long timeout)
102364+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
102365 {
102366 struct timer_list timer;
102367 unsigned long expire;
102368diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
102369index 61ed862..3b52c65 100644
102370--- a/kernel/time/timer_list.c
102371+++ b/kernel/time/timer_list.c
102372@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
102373
102374 static void print_name_offset(struct seq_file *m, void *sym)
102375 {
102376+#ifdef CONFIG_GRKERNSEC_HIDESYM
102377+ SEQ_printf(m, "<%p>", NULL);
102378+#else
102379 char symname[KSYM_NAME_LEN];
102380
102381 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
102382 SEQ_printf(m, "<%pK>", sym);
102383 else
102384 SEQ_printf(m, "%s", symname);
102385+#endif
102386 }
102387
102388 static void
102389@@ -119,7 +123,11 @@ next_one:
102390 static void
102391 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
102392 {
102393+#ifdef CONFIG_GRKERNSEC_HIDESYM
102394+ SEQ_printf(m, " .base: %p\n", NULL);
102395+#else
102396 SEQ_printf(m, " .base: %pK\n", base);
102397+#endif
102398 SEQ_printf(m, " .index: %d\n",
102399 base->index);
102400 SEQ_printf(m, " .resolution: %Lu nsecs\n",
102401@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
102402 {
102403 struct proc_dir_entry *pe;
102404
102405+#ifdef CONFIG_GRKERNSEC_PROC_ADD
102406+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
102407+#else
102408 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
102409+#endif
102410 if (!pe)
102411 return -ENOMEM;
102412 return 0;
102413diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
102414index 1fb08f2..ca4bb1e 100644
102415--- a/kernel/time/timer_stats.c
102416+++ b/kernel/time/timer_stats.c
102417@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
102418 static unsigned long nr_entries;
102419 static struct entry entries[MAX_ENTRIES];
102420
102421-static atomic_t overflow_count;
102422+static atomic_unchecked_t overflow_count;
102423
102424 /*
102425 * The entries are in a hash-table, for fast lookup:
102426@@ -140,7 +140,7 @@ static void reset_entries(void)
102427 nr_entries = 0;
102428 memset(entries, 0, sizeof(entries));
102429 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
102430- atomic_set(&overflow_count, 0);
102431+ atomic_set_unchecked(&overflow_count, 0);
102432 }
102433
102434 static struct entry *alloc_entry(void)
102435@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
102436 if (likely(entry))
102437 entry->count++;
102438 else
102439- atomic_inc(&overflow_count);
102440+ atomic_inc_unchecked(&overflow_count);
102441
102442 out_unlock:
102443 raw_spin_unlock_irqrestore(lock, flags);
102444@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
102445
102446 static void print_name_offset(struct seq_file *m, unsigned long addr)
102447 {
102448+#ifdef CONFIG_GRKERNSEC_HIDESYM
102449+ seq_printf(m, "<%p>", NULL);
102450+#else
102451 char symname[KSYM_NAME_LEN];
102452
102453 if (lookup_symbol_name(addr, symname) < 0)
102454- seq_printf(m, "<%p>", (void *)addr);
102455+ seq_printf(m, "<%pK>", (void *)addr);
102456 else
102457 seq_printf(m, "%s", symname);
102458+#endif
102459 }
102460
102461 static int tstats_show(struct seq_file *m, void *v)
102462@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
102463
102464 seq_puts(m, "Timer Stats Version: v0.3\n");
102465 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
102466- if (atomic_read(&overflow_count))
102467- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
102468+ if (atomic_read_unchecked(&overflow_count))
102469+ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
102470 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
102471
102472 for (i = 0; i < nr_entries; i++) {
102473@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
102474 {
102475 struct proc_dir_entry *pe;
102476
102477+#ifdef CONFIG_GRKERNSEC_PROC_ADD
102478+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
102479+#else
102480 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
102481+#endif
102482 if (!pe)
102483 return -ENOMEM;
102484 return 0;
102485diff --git a/kernel/torture.c b/kernel/torture.c
102486index dd70993..0bf694b 100644
102487--- a/kernel/torture.c
102488+++ b/kernel/torture.c
102489@@ -482,7 +482,7 @@ static int torture_shutdown_notify(struct notifier_block *unused1,
102490 mutex_lock(&fullstop_mutex);
102491 if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
102492 VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
102493- ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
102494+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_SHUTDOWN;
102495 } else {
102496 pr_warn("Concurrent rmmod and shutdown illegal!\n");
102497 }
102498@@ -549,14 +549,14 @@ static int torture_stutter(void *arg)
102499 if (!torture_must_stop()) {
102500 if (stutter > 1) {
102501 schedule_timeout_interruptible(stutter - 1);
102502- ACCESS_ONCE(stutter_pause_test) = 2;
102503+ ACCESS_ONCE_RW(stutter_pause_test) = 2;
102504 }
102505 schedule_timeout_interruptible(1);
102506- ACCESS_ONCE(stutter_pause_test) = 1;
102507+ ACCESS_ONCE_RW(stutter_pause_test) = 1;
102508 }
102509 if (!torture_must_stop())
102510 schedule_timeout_interruptible(stutter);
102511- ACCESS_ONCE(stutter_pause_test) = 0;
102512+ ACCESS_ONCE_RW(stutter_pause_test) = 0;
102513 torture_shutdown_absorb("torture_stutter");
102514 } while (!torture_must_stop());
102515 torture_kthread_stopping("torture_stutter");
102516@@ -648,7 +648,7 @@ bool torture_cleanup_begin(void)
102517 schedule_timeout_uninterruptible(10);
102518 return true;
102519 }
102520- ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
102521+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_RMMOD;
102522 mutex_unlock(&fullstop_mutex);
102523 torture_shutdown_cleanup();
102524 torture_shuffle_cleanup();
102525diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
102526index 483cecf..ac46091 100644
102527--- a/kernel/trace/blktrace.c
102528+++ b/kernel/trace/blktrace.c
102529@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
102530 struct blk_trace *bt = filp->private_data;
102531 char buf[16];
102532
102533- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
102534+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
102535
102536 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
102537 }
102538@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
102539 return 1;
102540
102541 bt = buf->chan->private_data;
102542- atomic_inc(&bt->dropped);
102543+ atomic_inc_unchecked(&bt->dropped);
102544 return 0;
102545 }
102546
102547@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
102548
102549 bt->dir = dir;
102550 bt->dev = dev;
102551- atomic_set(&bt->dropped, 0);
102552+ atomic_set_unchecked(&bt->dropped, 0);
102553 INIT_LIST_HEAD(&bt->running_list);
102554
102555 ret = -EIO;
102556diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
102557index 4f22802..bd268b1 100644
102558--- a/kernel/trace/ftrace.c
102559+++ b/kernel/trace/ftrace.c
102560@@ -2382,12 +2382,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
102561 if (unlikely(ftrace_disabled))
102562 return 0;
102563
102564+ ret = ftrace_arch_code_modify_prepare();
102565+ FTRACE_WARN_ON(ret);
102566+ if (ret)
102567+ return 0;
102568+
102569 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
102570+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
102571 if (ret) {
102572 ftrace_bug(ret, rec);
102573- return 0;
102574 }
102575- return 1;
102576+ return ret ? 0 : 1;
102577 }
102578
102579 /*
102580@@ -4776,8 +4781,10 @@ static int ftrace_process_locs(struct module *mod,
102581 if (!count)
102582 return 0;
102583
102584+ pax_open_kernel();
102585 sort(start, count, sizeof(*start),
102586 ftrace_cmp_ips, ftrace_swap_ips);
102587+ pax_close_kernel();
102588
102589 start_pg = ftrace_allocate_pages(count);
102590 if (!start_pg)
102591@@ -5653,7 +5660,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
102592
102593 if (t->ret_stack == NULL) {
102594 atomic_set(&t->tracing_graph_pause, 0);
102595- atomic_set(&t->trace_overrun, 0);
102596+ atomic_set_unchecked(&t->trace_overrun, 0);
102597 t->curr_ret_stack = -1;
102598 /* Make sure the tasks see the -1 first: */
102599 smp_wmb();
102600@@ -5876,7 +5883,7 @@ static void
102601 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
102602 {
102603 atomic_set(&t->tracing_graph_pause, 0);
102604- atomic_set(&t->trace_overrun, 0);
102605+ atomic_set_unchecked(&t->trace_overrun, 0);
102606 t->ftrace_timestamp = 0;
102607 /* make curr_ret_stack visible before we add the ret_stack */
102608 smp_wmb();
102609diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
102610index 922048a..bb71a55 100644
102611--- a/kernel/trace/ring_buffer.c
102612+++ b/kernel/trace/ring_buffer.c
102613@@ -348,9 +348,9 @@ struct buffer_data_page {
102614 */
102615 struct buffer_page {
102616 struct list_head list; /* list of buffer pages */
102617- local_t write; /* index for next write */
102618+ local_unchecked_t write; /* index for next write */
102619 unsigned read; /* index for next read */
102620- local_t entries; /* entries on this page */
102621+ local_unchecked_t entries; /* entries on this page */
102622 unsigned long real_end; /* real end of data */
102623 struct buffer_data_page *page; /* Actual data page */
102624 };
102625@@ -471,11 +471,11 @@ struct ring_buffer_per_cpu {
102626 unsigned long last_overrun;
102627 local_t entries_bytes;
102628 local_t entries;
102629- local_t overrun;
102630- local_t commit_overrun;
102631- local_t dropped_events;
102632+ local_unchecked_t overrun;
102633+ local_unchecked_t commit_overrun;
102634+ local_unchecked_t dropped_events;
102635 local_t committing;
102636- local_t commits;
102637+ local_unchecked_t commits;
102638 unsigned long read;
102639 unsigned long read_bytes;
102640 u64 write_stamp;
102641@@ -1045,8 +1045,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
102642 *
102643 * We add a counter to the write field to denote this.
102644 */
102645- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
102646- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
102647+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
102648+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
102649
102650 /*
102651 * Just make sure we have seen our old_write and synchronize
102652@@ -1074,8 +1074,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
102653 * cmpxchg to only update if an interrupt did not already
102654 * do it for us. If the cmpxchg fails, we don't care.
102655 */
102656- (void)local_cmpxchg(&next_page->write, old_write, val);
102657- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
102658+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
102659+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
102660
102661 /*
102662 * No need to worry about races with clearing out the commit.
102663@@ -1443,12 +1443,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
102664
102665 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
102666 {
102667- return local_read(&bpage->entries) & RB_WRITE_MASK;
102668+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
102669 }
102670
102671 static inline unsigned long rb_page_write(struct buffer_page *bpage)
102672 {
102673- return local_read(&bpage->write) & RB_WRITE_MASK;
102674+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
102675 }
102676
102677 static int
102678@@ -1543,7 +1543,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
102679 * bytes consumed in ring buffer from here.
102680 * Increment overrun to account for the lost events.
102681 */
102682- local_add(page_entries, &cpu_buffer->overrun);
102683+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
102684 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
102685 }
102686
102687@@ -2105,7 +2105,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
102688 * it is our responsibility to update
102689 * the counters.
102690 */
102691- local_add(entries, &cpu_buffer->overrun);
102692+ local_add_unchecked(entries, &cpu_buffer->overrun);
102693 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
102694
102695 /*
102696@@ -2255,7 +2255,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
102697 if (tail == BUF_PAGE_SIZE)
102698 tail_page->real_end = 0;
102699
102700- local_sub(length, &tail_page->write);
102701+ local_sub_unchecked(length, &tail_page->write);
102702 return;
102703 }
102704
102705@@ -2290,7 +2290,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
102706 rb_event_set_padding(event);
102707
102708 /* Set the write back to the previous setting */
102709- local_sub(length, &tail_page->write);
102710+ local_sub_unchecked(length, &tail_page->write);
102711 return;
102712 }
102713
102714@@ -2302,7 +2302,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
102715
102716 /* Set write to end of buffer */
102717 length = (tail + length) - BUF_PAGE_SIZE;
102718- local_sub(length, &tail_page->write);
102719+ local_sub_unchecked(length, &tail_page->write);
102720 }
102721
102722 /*
102723@@ -2328,7 +2328,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
102724 * about it.
102725 */
102726 if (unlikely(next_page == commit_page)) {
102727- local_inc(&cpu_buffer->commit_overrun);
102728+ local_inc_unchecked(&cpu_buffer->commit_overrun);
102729 goto out_reset;
102730 }
102731
102732@@ -2358,7 +2358,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
102733 * this is easy, just stop here.
102734 */
102735 if (!(buffer->flags & RB_FL_OVERWRITE)) {
102736- local_inc(&cpu_buffer->dropped_events);
102737+ local_inc_unchecked(&cpu_buffer->dropped_events);
102738 goto out_reset;
102739 }
102740
102741@@ -2384,7 +2384,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
102742 cpu_buffer->tail_page) &&
102743 (cpu_buffer->commit_page ==
102744 cpu_buffer->reader_page))) {
102745- local_inc(&cpu_buffer->commit_overrun);
102746+ local_inc_unchecked(&cpu_buffer->commit_overrun);
102747 goto out_reset;
102748 }
102749 }
102750@@ -2432,7 +2432,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
102751 length += RB_LEN_TIME_EXTEND;
102752
102753 tail_page = cpu_buffer->tail_page;
102754- write = local_add_return(length, &tail_page->write);
102755+ write = local_add_return_unchecked(length, &tail_page->write);
102756
102757 /* set write to only the index of the write */
102758 write &= RB_WRITE_MASK;
102759@@ -2456,7 +2456,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
102760 kmemcheck_annotate_bitfield(event, bitfield);
102761 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
102762
102763- local_inc(&tail_page->entries);
102764+ local_inc_unchecked(&tail_page->entries);
102765
102766 /*
102767 * If this is the first commit on the page, then update
102768@@ -2489,7 +2489,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
102769
102770 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
102771 unsigned long write_mask =
102772- local_read(&bpage->write) & ~RB_WRITE_MASK;
102773+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
102774 unsigned long event_length = rb_event_length(event);
102775 /*
102776 * This is on the tail page. It is possible that
102777@@ -2499,7 +2499,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
102778 */
102779 old_index += write_mask;
102780 new_index += write_mask;
102781- index = local_cmpxchg(&bpage->write, old_index, new_index);
102782+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
102783 if (index == old_index) {
102784 /* update counters */
102785 local_sub(event_length, &cpu_buffer->entries_bytes);
102786@@ -2514,7 +2514,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
102787 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
102788 {
102789 local_inc(&cpu_buffer->committing);
102790- local_inc(&cpu_buffer->commits);
102791+ local_inc_unchecked(&cpu_buffer->commits);
102792 }
102793
102794 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
102795@@ -2526,7 +2526,7 @@ static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
102796 return;
102797
102798 again:
102799- commits = local_read(&cpu_buffer->commits);
102800+ commits = local_read_unchecked(&cpu_buffer->commits);
102801 /* synchronize with interrupts */
102802 barrier();
102803 if (local_read(&cpu_buffer->committing) == 1)
102804@@ -2542,7 +2542,7 @@ static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
102805 * updating of the commit page and the clearing of the
102806 * committing counter.
102807 */
102808- if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
102809+ if (unlikely(local_read_unchecked(&cpu_buffer->commits) != commits) &&
102810 !local_read(&cpu_buffer->committing)) {
102811 local_inc(&cpu_buffer->committing);
102812 goto again;
102813@@ -2572,7 +2572,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
102814 barrier();
102815 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
102816 local_dec(&cpu_buffer->committing);
102817- local_dec(&cpu_buffer->commits);
102818+ local_dec_unchecked(&cpu_buffer->commits);
102819 return NULL;
102820 }
102821 #endif
102822@@ -2901,7 +2901,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
102823
102824 /* Do the likely case first */
102825 if (likely(bpage->page == (void *)addr)) {
102826- local_dec(&bpage->entries);
102827+ local_dec_unchecked(&bpage->entries);
102828 return;
102829 }
102830
102831@@ -2913,7 +2913,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
102832 start = bpage;
102833 do {
102834 if (bpage->page == (void *)addr) {
102835- local_dec(&bpage->entries);
102836+ local_dec_unchecked(&bpage->entries);
102837 return;
102838 }
102839 rb_inc_page(cpu_buffer, &bpage);
102840@@ -3197,7 +3197,7 @@ static inline unsigned long
102841 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
102842 {
102843 return local_read(&cpu_buffer->entries) -
102844- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
102845+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
102846 }
102847
102848 /**
102849@@ -3286,7 +3286,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
102850 return 0;
102851
102852 cpu_buffer = buffer->buffers[cpu];
102853- ret = local_read(&cpu_buffer->overrun);
102854+ ret = local_read_unchecked(&cpu_buffer->overrun);
102855
102856 return ret;
102857 }
102858@@ -3309,7 +3309,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
102859 return 0;
102860
102861 cpu_buffer = buffer->buffers[cpu];
102862- ret = local_read(&cpu_buffer->commit_overrun);
102863+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
102864
102865 return ret;
102866 }
102867@@ -3331,7 +3331,7 @@ ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
102868 return 0;
102869
102870 cpu_buffer = buffer->buffers[cpu];
102871- ret = local_read(&cpu_buffer->dropped_events);
102872+ ret = local_read_unchecked(&cpu_buffer->dropped_events);
102873
102874 return ret;
102875 }
102876@@ -3394,7 +3394,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
102877 /* if you care about this being correct, lock the buffer */
102878 for_each_buffer_cpu(buffer, cpu) {
102879 cpu_buffer = buffer->buffers[cpu];
102880- overruns += local_read(&cpu_buffer->overrun);
102881+ overruns += local_read_unchecked(&cpu_buffer->overrun);
102882 }
102883
102884 return overruns;
102885@@ -3565,8 +3565,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
102886 /*
102887 * Reset the reader page to size zero.
102888 */
102889- local_set(&cpu_buffer->reader_page->write, 0);
102890- local_set(&cpu_buffer->reader_page->entries, 0);
102891+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
102892+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
102893 local_set(&cpu_buffer->reader_page->page->commit, 0);
102894 cpu_buffer->reader_page->real_end = 0;
102895
102896@@ -3600,7 +3600,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
102897 * want to compare with the last_overrun.
102898 */
102899 smp_mb();
102900- overwrite = local_read(&(cpu_buffer->overrun));
102901+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
102902
102903 /*
102904 * Here's the tricky part.
102905@@ -4172,8 +4172,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
102906
102907 cpu_buffer->head_page
102908 = list_entry(cpu_buffer->pages, struct buffer_page, list);
102909- local_set(&cpu_buffer->head_page->write, 0);
102910- local_set(&cpu_buffer->head_page->entries, 0);
102911+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
102912+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
102913 local_set(&cpu_buffer->head_page->page->commit, 0);
102914
102915 cpu_buffer->head_page->read = 0;
102916@@ -4183,18 +4183,18 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
102917
102918 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
102919 INIT_LIST_HEAD(&cpu_buffer->new_pages);
102920- local_set(&cpu_buffer->reader_page->write, 0);
102921- local_set(&cpu_buffer->reader_page->entries, 0);
102922+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
102923+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
102924 local_set(&cpu_buffer->reader_page->page->commit, 0);
102925 cpu_buffer->reader_page->read = 0;
102926
102927 local_set(&cpu_buffer->entries_bytes, 0);
102928- local_set(&cpu_buffer->overrun, 0);
102929- local_set(&cpu_buffer->commit_overrun, 0);
102930- local_set(&cpu_buffer->dropped_events, 0);
102931+ local_set_unchecked(&cpu_buffer->overrun, 0);
102932+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
102933+ local_set_unchecked(&cpu_buffer->dropped_events, 0);
102934 local_set(&cpu_buffer->entries, 0);
102935 local_set(&cpu_buffer->committing, 0);
102936- local_set(&cpu_buffer->commits, 0);
102937+ local_set_unchecked(&cpu_buffer->commits, 0);
102938 cpu_buffer->read = 0;
102939 cpu_buffer->read_bytes = 0;
102940
102941@@ -4595,8 +4595,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
102942 rb_init_page(bpage);
102943 bpage = reader->page;
102944 reader->page = *data_page;
102945- local_set(&reader->write, 0);
102946- local_set(&reader->entries, 0);
102947+ local_set_unchecked(&reader->write, 0);
102948+ local_set_unchecked(&reader->entries, 0);
102949 reader->read = 0;
102950 *data_page = bpage;
102951
102952diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
102953index 62c6506..5c25989 100644
102954--- a/kernel/trace/trace.c
102955+++ b/kernel/trace/trace.c
102956@@ -3500,7 +3500,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
102957 return 0;
102958 }
102959
102960-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
102961+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
102962 {
102963 /* do nothing if flag is already set */
102964 if (!!(trace_flags & mask) == !!enabled)
102965diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
102966index dd8205a..1aae87a 100644
102967--- a/kernel/trace/trace.h
102968+++ b/kernel/trace/trace.h
102969@@ -1271,7 +1271,7 @@ extern const char *__stop___tracepoint_str[];
102970 void trace_printk_init_buffers(void);
102971 void trace_printk_start_comm(void);
102972 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
102973-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
102974+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
102975
102976 /*
102977 * Normal trace_printk() and friends allocates special buffers
102978diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
102979index 57b67b1..66082a9 100644
102980--- a/kernel/trace/trace_clock.c
102981+++ b/kernel/trace/trace_clock.c
102982@@ -124,7 +124,7 @@ u64 notrace trace_clock_global(void)
102983 return now;
102984 }
102985
102986-static atomic64_t trace_counter;
102987+static atomic64_unchecked_t trace_counter;
102988
102989 /*
102990 * trace_clock_counter(): simply an atomic counter.
102991@@ -133,5 +133,5 @@ static atomic64_t trace_counter;
102992 */
102993 u64 notrace trace_clock_counter(void)
102994 {
102995- return atomic64_add_return(1, &trace_counter);
102996+ return atomic64_inc_return_unchecked(&trace_counter);
102997 }
102998diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
102999index a9c10a3..1864f6b 100644
103000--- a/kernel/trace/trace_events.c
103001+++ b/kernel/trace/trace_events.c
103002@@ -1762,7 +1762,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
103003 return 0;
103004 }
103005
103006-struct ftrace_module_file_ops;
103007 static void __add_event_to_tracers(struct ftrace_event_call *call);
103008
103009 /* Add an additional event_call dynamically */
103010diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
103011index b6fce36..d9f11a3 100644
103012--- a/kernel/trace/trace_functions_graph.c
103013+++ b/kernel/trace/trace_functions_graph.c
103014@@ -133,7 +133,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
103015
103016 /* The return trace stack is full */
103017 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
103018- atomic_inc(&current->trace_overrun);
103019+ atomic_inc_unchecked(&current->trace_overrun);
103020 return -EBUSY;
103021 }
103022
103023@@ -230,7 +230,7 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
103024 *ret = current->ret_stack[index].ret;
103025 trace->func = current->ret_stack[index].func;
103026 trace->calltime = current->ret_stack[index].calltime;
103027- trace->overrun = atomic_read(&current->trace_overrun);
103028+ trace->overrun = atomic_read_unchecked(&current->trace_overrun);
103029 trace->depth = index;
103030 }
103031
103032diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
103033index 7a9ba62..2e0e4a1 100644
103034--- a/kernel/trace/trace_mmiotrace.c
103035+++ b/kernel/trace/trace_mmiotrace.c
103036@@ -24,7 +24,7 @@ struct header_iter {
103037 static struct trace_array *mmio_trace_array;
103038 static bool overrun_detected;
103039 static unsigned long prev_overruns;
103040-static atomic_t dropped_count;
103041+static atomic_unchecked_t dropped_count;
103042
103043 static void mmio_reset_data(struct trace_array *tr)
103044 {
103045@@ -124,7 +124,7 @@ static void mmio_close(struct trace_iterator *iter)
103046
103047 static unsigned long count_overruns(struct trace_iterator *iter)
103048 {
103049- unsigned long cnt = atomic_xchg(&dropped_count, 0);
103050+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
103051 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
103052
103053 if (over > prev_overruns)
103054@@ -307,7 +307,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
103055 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
103056 sizeof(*entry), 0, pc);
103057 if (!event) {
103058- atomic_inc(&dropped_count);
103059+ atomic_inc_unchecked(&dropped_count);
103060 return;
103061 }
103062 entry = ring_buffer_event_data(event);
103063@@ -337,7 +337,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
103064 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
103065 sizeof(*entry), 0, pc);
103066 if (!event) {
103067- atomic_inc(&dropped_count);
103068+ atomic_inc_unchecked(&dropped_count);
103069 return;
103070 }
103071 entry = ring_buffer_event_data(event);
103072diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
103073index 692bf71..6d9a9cd 100644
103074--- a/kernel/trace/trace_output.c
103075+++ b/kernel/trace/trace_output.c
103076@@ -751,14 +751,16 @@ int register_ftrace_event(struct trace_event *event)
103077 goto out;
103078 }
103079
103080+ pax_open_kernel();
103081 if (event->funcs->trace == NULL)
103082- event->funcs->trace = trace_nop_print;
103083+ *(void **)&event->funcs->trace = trace_nop_print;
103084 if (event->funcs->raw == NULL)
103085- event->funcs->raw = trace_nop_print;
103086+ *(void **)&event->funcs->raw = trace_nop_print;
103087 if (event->funcs->hex == NULL)
103088- event->funcs->hex = trace_nop_print;
103089+ *(void **)&event->funcs->hex = trace_nop_print;
103090 if (event->funcs->binary == NULL)
103091- event->funcs->binary = trace_nop_print;
103092+ *(void **)&event->funcs->binary = trace_nop_print;
103093+ pax_close_kernel();
103094
103095 key = event->type & (EVENT_HASHSIZE - 1);
103096
103097diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
103098index e694c9f..6775a38 100644
103099--- a/kernel/trace/trace_seq.c
103100+++ b/kernel/trace/trace_seq.c
103101@@ -337,7 +337,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
103102 return 0;
103103 }
103104
103105- seq_buf_path(&s->seq, path, "\n");
103106+ seq_buf_path(&s->seq, path, "\n\\");
103107
103108 if (unlikely(seq_buf_has_overflowed(&s->seq))) {
103109 s->seq.len = save_len;
103110diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
103111index c3e4fcf..ef6cc43 100644
103112--- a/kernel/trace/trace_stack.c
103113+++ b/kernel/trace/trace_stack.c
103114@@ -88,7 +88,7 @@ check_stack(unsigned long ip, unsigned long *stack)
103115 return;
103116
103117 /* we do not handle interrupt stacks yet */
103118- if (!object_is_on_stack(stack))
103119+ if (!object_starts_on_stack(stack))
103120 return;
103121
103122 local_irq_save(flags);
103123diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
103124index f97f6e3..d367b48 100644
103125--- a/kernel/trace/trace_syscalls.c
103126+++ b/kernel/trace/trace_syscalls.c
103127@@ -590,6 +590,8 @@ static int perf_sysenter_enable(struct ftrace_event_call *call)
103128 int num;
103129
103130 num = ((struct syscall_metadata *)call->data)->syscall_nr;
103131+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
103132+ return -EINVAL;
103133
103134 mutex_lock(&syscall_trace_lock);
103135 if (!sys_perf_refcount_enter)
103136@@ -610,6 +612,8 @@ static void perf_sysenter_disable(struct ftrace_event_call *call)
103137 int num;
103138
103139 num = ((struct syscall_metadata *)call->data)->syscall_nr;
103140+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
103141+ return;
103142
103143 mutex_lock(&syscall_trace_lock);
103144 sys_perf_refcount_enter--;
103145@@ -662,6 +666,8 @@ static int perf_sysexit_enable(struct ftrace_event_call *call)
103146 int num;
103147
103148 num = ((struct syscall_metadata *)call->data)->syscall_nr;
103149+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
103150+ return -EINVAL;
103151
103152 mutex_lock(&syscall_trace_lock);
103153 if (!sys_perf_refcount_exit)
103154@@ -682,6 +688,8 @@ static void perf_sysexit_disable(struct ftrace_event_call *call)
103155 int num;
103156
103157 num = ((struct syscall_metadata *)call->data)->syscall_nr;
103158+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
103159+ return;
103160
103161 mutex_lock(&syscall_trace_lock);
103162 sys_perf_refcount_exit--;
103163diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
103164index 4109f83..fe1f830 100644
103165--- a/kernel/user_namespace.c
103166+++ b/kernel/user_namespace.c
103167@@ -83,6 +83,21 @@ int create_user_ns(struct cred *new)
103168 !kgid_has_mapping(parent_ns, group))
103169 return -EPERM;
103170
103171+#ifdef CONFIG_GRKERNSEC
103172+ /*
103173+ * This doesn't really inspire confidence:
103174+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
103175+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
103176+ * Increases kernel attack surface in areas developers
103177+ * previously cared little about ("low importance due
103178+ * to requiring "root" capability")
103179+ * To be removed when this code receives *proper* review
103180+ */
103181+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
103182+ !capable(CAP_SETGID))
103183+ return -EPERM;
103184+#endif
103185+
103186 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
103187 if (!ns)
103188 return -ENOMEM;
103189@@ -980,7 +995,7 @@ static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
103190 if (atomic_read(&current->mm->mm_users) > 1)
103191 return -EINVAL;
103192
103193- if (current->fs->users != 1)
103194+ if (atomic_read(&current->fs->users) != 1)
103195 return -EINVAL;
103196
103197 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
103198diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
103199index c8eac43..4b5f08f 100644
103200--- a/kernel/utsname_sysctl.c
103201+++ b/kernel/utsname_sysctl.c
103202@@ -47,7 +47,7 @@ static void put_uts(struct ctl_table *table, int write, void *which)
103203 static int proc_do_uts_string(struct ctl_table *table, int write,
103204 void __user *buffer, size_t *lenp, loff_t *ppos)
103205 {
103206- struct ctl_table uts_table;
103207+ ctl_table_no_const uts_table;
103208 int r;
103209 memcpy(&uts_table, table, sizeof(uts_table));
103210 uts_table.data = get_uts(table, write);
103211diff --git a/kernel/watchdog.c b/kernel/watchdog.c
103212index 3174bf8..3553520 100644
103213--- a/kernel/watchdog.c
103214+++ b/kernel/watchdog.c
103215@@ -572,7 +572,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
103216 static void watchdog_nmi_disable(unsigned int cpu) { return; }
103217 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
103218
103219-static struct smp_hotplug_thread watchdog_threads = {
103220+static struct smp_hotplug_thread watchdog_threads __read_only = {
103221 .store = &softlockup_watchdog,
103222 .thread_should_run = watchdog_should_run,
103223 .thread_fn = watchdog,
103224diff --git a/kernel/workqueue.c b/kernel/workqueue.c
103225index 41ff75b..5ad683a 100644
103226--- a/kernel/workqueue.c
103227+++ b/kernel/workqueue.c
103228@@ -4564,7 +4564,7 @@ static void rebind_workers(struct worker_pool *pool)
103229 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
103230 worker_flags |= WORKER_REBOUND;
103231 worker_flags &= ~WORKER_UNBOUND;
103232- ACCESS_ONCE(worker->flags) = worker_flags;
103233+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
103234 }
103235
103236 spin_unlock_irq(&pool->lock);
103237diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
103238index c5cefb3..a4241e3 100644
103239--- a/lib/Kconfig.debug
103240+++ b/lib/Kconfig.debug
103241@@ -923,7 +923,7 @@ config DEBUG_MUTEXES
103242
103243 config DEBUG_WW_MUTEX_SLOWPATH
103244 bool "Wait/wound mutex debugging: Slowpath testing"
103245- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
103246+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
103247 select DEBUG_LOCK_ALLOC
103248 select DEBUG_SPINLOCK
103249 select DEBUG_MUTEXES
103250@@ -940,7 +940,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
103251
103252 config DEBUG_LOCK_ALLOC
103253 bool "Lock debugging: detect incorrect freeing of live locks"
103254- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
103255+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
103256 select DEBUG_SPINLOCK
103257 select DEBUG_MUTEXES
103258 select LOCKDEP
103259@@ -954,7 +954,7 @@ config DEBUG_LOCK_ALLOC
103260
103261 config PROVE_LOCKING
103262 bool "Lock debugging: prove locking correctness"
103263- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
103264+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
103265 select LOCKDEP
103266 select DEBUG_SPINLOCK
103267 select DEBUG_MUTEXES
103268@@ -1005,7 +1005,7 @@ config LOCKDEP
103269
103270 config LOCK_STAT
103271 bool "Lock usage statistics"
103272- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
103273+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
103274 select LOCKDEP
103275 select DEBUG_SPINLOCK
103276 select DEBUG_MUTEXES
103277@@ -1467,6 +1467,7 @@ config LATENCYTOP
103278 depends on DEBUG_KERNEL
103279 depends on STACKTRACE_SUPPORT
103280 depends on PROC_FS
103281+ depends on !GRKERNSEC_HIDESYM
103282 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
103283 select KALLSYMS
103284 select KALLSYMS_ALL
103285@@ -1483,7 +1484,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
103286 config DEBUG_STRICT_USER_COPY_CHECKS
103287 bool "Strict user copy size checks"
103288 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
103289- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
103290+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
103291 help
103292 Enabling this option turns a certain set of sanity checks for user
103293 copy operations into compile time failures.
103294@@ -1614,7 +1615,7 @@ endmenu # runtime tests
103295
103296 config PROVIDE_OHCI1394_DMA_INIT
103297 bool "Remote debugging over FireWire early on boot"
103298- depends on PCI && X86
103299+ depends on PCI && X86 && !GRKERNSEC
103300 help
103301 If you want to debug problems which hang or crash the kernel early
103302 on boot and the crashing machine has a FireWire port, you can use
103303diff --git a/lib/Makefile b/lib/Makefile
103304index 58f74d2..08e011f 100644
103305--- a/lib/Makefile
103306+++ b/lib/Makefile
103307@@ -59,7 +59,7 @@ obj-$(CONFIG_BTREE) += btree.o
103308 obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
103309 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
103310 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
103311-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
103312+obj-y += list_debug.o
103313 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
103314
103315 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
103316diff --git a/lib/average.c b/lib/average.c
103317index 114d1be..ab0350c 100644
103318--- a/lib/average.c
103319+++ b/lib/average.c
103320@@ -55,7 +55,7 @@ struct ewma *ewma_add(struct ewma *avg, unsigned long val)
103321 {
103322 unsigned long internal = ACCESS_ONCE(avg->internal);
103323
103324- ACCESS_ONCE(avg->internal) = internal ?
103325+ ACCESS_ONCE_RW(avg->internal) = internal ?
103326 (((internal << avg->weight) - internal) +
103327 (val << avg->factor)) >> avg->weight :
103328 (val << avg->factor);
103329diff --git a/lib/bitmap.c b/lib/bitmap.c
103330index d456f4c1..29a0308 100644
103331--- a/lib/bitmap.c
103332+++ b/lib/bitmap.c
103333@@ -264,7 +264,7 @@ int __bitmap_subset(const unsigned long *bitmap1,
103334 }
103335 EXPORT_SYMBOL(__bitmap_subset);
103336
103337-int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
103338+int __intentional_overflow(-1) __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
103339 {
103340 unsigned int k, lim = bits/BITS_PER_LONG;
103341 int w = 0;
103342@@ -391,7 +391,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
103343 {
103344 int c, old_c, totaldigits, ndigits, nchunks, nbits;
103345 u32 chunk;
103346- const char __user __force *ubuf = (const char __user __force *)buf;
103347+ const char __user *ubuf = (const char __force_user *)buf;
103348
103349 bitmap_zero(maskp, nmaskbits);
103350
103351@@ -476,7 +476,7 @@ int bitmap_parse_user(const char __user *ubuf,
103352 {
103353 if (!access_ok(VERIFY_READ, ubuf, ulen))
103354 return -EFAULT;
103355- return __bitmap_parse((const char __force *)ubuf,
103356+ return __bitmap_parse((const char __force_kernel *)ubuf,
103357 ulen, 1, maskp, nmaskbits);
103358
103359 }
103360@@ -535,7 +535,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
103361 {
103362 unsigned a, b;
103363 int c, old_c, totaldigits;
103364- const char __user __force *ubuf = (const char __user __force *)buf;
103365+ const char __user *ubuf = (const char __force_user *)buf;
103366 int exp_digit, in_range;
103367
103368 totaldigits = c = 0;
103369@@ -630,7 +630,7 @@ int bitmap_parselist_user(const char __user *ubuf,
103370 {
103371 if (!access_ok(VERIFY_READ, ubuf, ulen))
103372 return -EFAULT;
103373- return __bitmap_parselist((const char __force *)ubuf,
103374+ return __bitmap_parselist((const char __force_kernel *)ubuf,
103375 ulen, 1, maskp, nmaskbits);
103376 }
103377 EXPORT_SYMBOL(bitmap_parselist_user);
103378diff --git a/lib/bug.c b/lib/bug.c
103379index 0c3bd95..5a615a1 100644
103380--- a/lib/bug.c
103381+++ b/lib/bug.c
103382@@ -145,6 +145,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
103383 return BUG_TRAP_TYPE_NONE;
103384
103385 bug = find_bug(bugaddr);
103386+ if (!bug)
103387+ return BUG_TRAP_TYPE_NONE;
103388
103389 file = NULL;
103390 line = 0;
103391diff --git a/lib/debugobjects.c b/lib/debugobjects.c
103392index 547f7f9..a6d4ba0 100644
103393--- a/lib/debugobjects.c
103394+++ b/lib/debugobjects.c
103395@@ -289,7 +289,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
103396 if (limit > 4)
103397 return;
103398
103399- is_on_stack = object_is_on_stack(addr);
103400+ is_on_stack = object_starts_on_stack(addr);
103401 if (is_on_stack == onstack)
103402 return;
103403
103404diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
103405index 6dd0335..1e9c239 100644
103406--- a/lib/decompress_bunzip2.c
103407+++ b/lib/decompress_bunzip2.c
103408@@ -665,7 +665,8 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, long len,
103409
103410 /* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of
103411 uncompressed data. Allocate intermediate buffer for block. */
103412- bd->dbufSize = 100000*(i-BZh0);
103413+ i -= BZh0;
103414+ bd->dbufSize = 100000 * i;
103415
103416 bd->dbuf = large_malloc(bd->dbufSize * sizeof(int));
103417 if (!bd->dbuf)
103418diff --git a/lib/div64.c b/lib/div64.c
103419index 4382ad7..08aa558 100644
103420--- a/lib/div64.c
103421+++ b/lib/div64.c
103422@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
103423 EXPORT_SYMBOL(__div64_32);
103424
103425 #ifndef div_s64_rem
103426-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
103427+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
103428 {
103429 u64 quotient;
103430
103431@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
103432 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
103433 */
103434 #ifndef div64_u64
103435-u64 div64_u64(u64 dividend, u64 divisor)
103436+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
103437 {
103438 u32 high = divisor >> 32;
103439 u64 quot;
103440diff --git a/lib/dma-debug.c b/lib/dma-debug.c
103441index 9722bd2..0d826f4 100644
103442--- a/lib/dma-debug.c
103443+++ b/lib/dma-debug.c
103444@@ -979,7 +979,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
103445
103446 void dma_debug_add_bus(struct bus_type *bus)
103447 {
103448- struct notifier_block *nb;
103449+ notifier_block_no_const *nb;
103450
103451 if (dma_debug_disabled())
103452 return;
103453@@ -1161,7 +1161,7 @@ static void check_unmap(struct dma_debug_entry *ref)
103454
103455 static void check_for_stack(struct device *dev, void *addr)
103456 {
103457- if (object_is_on_stack(addr))
103458+ if (object_starts_on_stack(addr))
103459 err_printk(dev, NULL, "DMA-API: device driver maps memory from "
103460 "stack [addr=%p]\n", addr);
103461 }
103462diff --git a/lib/inflate.c b/lib/inflate.c
103463index 013a761..c28f3fc 100644
103464--- a/lib/inflate.c
103465+++ b/lib/inflate.c
103466@@ -269,7 +269,7 @@ static void free(void *where)
103467 malloc_ptr = free_mem_ptr;
103468 }
103469 #else
103470-#define malloc(a) kmalloc(a, GFP_KERNEL)
103471+#define malloc(a) kmalloc((a), GFP_KERNEL)
103472 #define free(a) kfree(a)
103473 #endif
103474
103475diff --git a/lib/ioremap.c b/lib/ioremap.c
103476index 0c9216c..863bd89 100644
103477--- a/lib/ioremap.c
103478+++ b/lib/ioremap.c
103479@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
103480 unsigned long next;
103481
103482 phys_addr -= addr;
103483- pmd = pmd_alloc(&init_mm, pud, addr);
103484+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
103485 if (!pmd)
103486 return -ENOMEM;
103487 do {
103488@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
103489 unsigned long next;
103490
103491 phys_addr -= addr;
103492- pud = pud_alloc(&init_mm, pgd, addr);
103493+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
103494 if (!pud)
103495 return -ENOMEM;
103496 do {
103497diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
103498index bd2bea9..6b3c95e 100644
103499--- a/lib/is_single_threaded.c
103500+++ b/lib/is_single_threaded.c
103501@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
103502 struct task_struct *p, *t;
103503 bool ret;
103504
103505+ if (!mm)
103506+ return true;
103507+
103508 if (atomic_read(&task->signal->live) != 1)
103509 return false;
103510
103511diff --git a/lib/kobject.c b/lib/kobject.c
103512index 03d4ab3..46f6374 100644
103513--- a/lib/kobject.c
103514+++ b/lib/kobject.c
103515@@ -931,9 +931,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
103516
103517
103518 static DEFINE_SPINLOCK(kobj_ns_type_lock);
103519-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
103520+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
103521
103522-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
103523+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
103524 {
103525 enum kobj_ns_type type = ops->type;
103526 int error;
103527diff --git a/lib/list_debug.c b/lib/list_debug.c
103528index c24c2f7..f0296f4 100644
103529--- a/lib/list_debug.c
103530+++ b/lib/list_debug.c
103531@@ -11,7 +11,9 @@
103532 #include <linux/bug.h>
103533 #include <linux/kernel.h>
103534 #include <linux/rculist.h>
103535+#include <linux/mm.h>
103536
103537+#ifdef CONFIG_DEBUG_LIST
103538 /*
103539 * Insert a new entry between two known consecutive entries.
103540 *
103541@@ -19,21 +21,40 @@
103542 * the prev/next entries already!
103543 */
103544
103545+static bool __list_add_debug(struct list_head *new,
103546+ struct list_head *prev,
103547+ struct list_head *next)
103548+{
103549+ if (unlikely(next->prev != prev)) {
103550+ printk(KERN_ERR "list_add corruption. next->prev should be "
103551+ "prev (%p), but was %p. (next=%p).\n",
103552+ prev, next->prev, next);
103553+ BUG();
103554+ return false;
103555+ }
103556+ if (unlikely(prev->next != next)) {
103557+ printk(KERN_ERR "list_add corruption. prev->next should be "
103558+ "next (%p), but was %p. (prev=%p).\n",
103559+ next, prev->next, prev);
103560+ BUG();
103561+ return false;
103562+ }
103563+ if (unlikely(new == prev || new == next)) {
103564+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
103565+ new, prev, next);
103566+ BUG();
103567+ return false;
103568+ }
103569+ return true;
103570+}
103571+
103572 void __list_add(struct list_head *new,
103573- struct list_head *prev,
103574- struct list_head *next)
103575+ struct list_head *prev,
103576+ struct list_head *next)
103577 {
103578- WARN(next->prev != prev,
103579- "list_add corruption. next->prev should be "
103580- "prev (%p), but was %p. (next=%p).\n",
103581- prev, next->prev, next);
103582- WARN(prev->next != next,
103583- "list_add corruption. prev->next should be "
103584- "next (%p), but was %p. (prev=%p).\n",
103585- next, prev->next, prev);
103586- WARN(new == prev || new == next,
103587- "list_add double add: new=%p, prev=%p, next=%p.\n",
103588- new, prev, next);
103589+ if (!__list_add_debug(new, prev, next))
103590+ return;
103591+
103592 next->prev = new;
103593 new->next = next;
103594 new->prev = prev;
103595@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
103596 }
103597 EXPORT_SYMBOL(__list_add);
103598
103599-void __list_del_entry(struct list_head *entry)
103600+static bool __list_del_entry_debug(struct list_head *entry)
103601 {
103602 struct list_head *prev, *next;
103603
103604 prev = entry->prev;
103605 next = entry->next;
103606
103607- if (WARN(next == LIST_POISON1,
103608- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
103609- entry, LIST_POISON1) ||
103610- WARN(prev == LIST_POISON2,
103611- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
103612- entry, LIST_POISON2) ||
103613- WARN(prev->next != entry,
103614- "list_del corruption. prev->next should be %p, "
103615- "but was %p\n", entry, prev->next) ||
103616- WARN(next->prev != entry,
103617- "list_del corruption. next->prev should be %p, "
103618- "but was %p\n", entry, next->prev))
103619+ if (unlikely(next == LIST_POISON1)) {
103620+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
103621+ entry, LIST_POISON1);
103622+ BUG();
103623+ return false;
103624+ }
103625+ if (unlikely(prev == LIST_POISON2)) {
103626+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
103627+ entry, LIST_POISON2);
103628+ BUG();
103629+ return false;
103630+ }
103631+ if (unlikely(entry->prev->next != entry)) {
103632+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
103633+ "but was %p\n", entry, prev->next);
103634+ BUG();
103635+ return false;
103636+ }
103637+ if (unlikely(entry->next->prev != entry)) {
103638+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
103639+ "but was %p\n", entry, next->prev);
103640+ BUG();
103641+ return false;
103642+ }
103643+ return true;
103644+}
103645+
103646+void __list_del_entry(struct list_head *entry)
103647+{
103648+ if (!__list_del_entry_debug(entry))
103649 return;
103650
103651- __list_del(prev, next);
103652+ __list_del(entry->prev, entry->next);
103653 }
103654 EXPORT_SYMBOL(__list_del_entry);
103655
103656@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
103657 void __list_add_rcu(struct list_head *new,
103658 struct list_head *prev, struct list_head *next)
103659 {
103660- WARN(next->prev != prev,
103661- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
103662- prev, next->prev, next);
103663- WARN(prev->next != next,
103664- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
103665- next, prev->next, prev);
103666+ if (!__list_add_debug(new, prev, next))
103667+ return;
103668+
103669 new->next = next;
103670 new->prev = prev;
103671 rcu_assign_pointer(list_next_rcu(prev), new);
103672 next->prev = new;
103673 }
103674 EXPORT_SYMBOL(__list_add_rcu);
103675+#endif
103676+
103677+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
103678+{
103679+#ifdef CONFIG_DEBUG_LIST
103680+ if (!__list_add_debug(new, prev, next))
103681+ return;
103682+#endif
103683+
103684+ pax_open_kernel();
103685+ next->prev = new;
103686+ new->next = next;
103687+ new->prev = prev;
103688+ prev->next = new;
103689+ pax_close_kernel();
103690+}
103691+EXPORT_SYMBOL(__pax_list_add);
103692+
103693+void pax_list_del(struct list_head *entry)
103694+{
103695+#ifdef CONFIG_DEBUG_LIST
103696+ if (!__list_del_entry_debug(entry))
103697+ return;
103698+#endif
103699+
103700+ pax_open_kernel();
103701+ __list_del(entry->prev, entry->next);
103702+ entry->next = LIST_POISON1;
103703+ entry->prev = LIST_POISON2;
103704+ pax_close_kernel();
103705+}
103706+EXPORT_SYMBOL(pax_list_del);
103707+
103708+void pax_list_del_init(struct list_head *entry)
103709+{
103710+ pax_open_kernel();
103711+ __list_del(entry->prev, entry->next);
103712+ INIT_LIST_HEAD(entry);
103713+ pax_close_kernel();
103714+}
103715+EXPORT_SYMBOL(pax_list_del_init);
103716+
103717+void __pax_list_add_rcu(struct list_head *new,
103718+ struct list_head *prev, struct list_head *next)
103719+{
103720+#ifdef CONFIG_DEBUG_LIST
103721+ if (!__list_add_debug(new, prev, next))
103722+ return;
103723+#endif
103724+
103725+ pax_open_kernel();
103726+ new->next = next;
103727+ new->prev = prev;
103728+ rcu_assign_pointer(list_next_rcu(prev), new);
103729+ next->prev = new;
103730+ pax_close_kernel();
103731+}
103732+EXPORT_SYMBOL(__pax_list_add_rcu);
103733+
103734+void pax_list_del_rcu(struct list_head *entry)
103735+{
103736+#ifdef CONFIG_DEBUG_LIST
103737+ if (!__list_del_entry_debug(entry))
103738+ return;
103739+#endif
103740+
103741+ pax_open_kernel();
103742+ __list_del(entry->prev, entry->next);
103743+ entry->next = LIST_POISON1;
103744+ entry->prev = LIST_POISON2;
103745+ pax_close_kernel();
103746+}
103747+EXPORT_SYMBOL(pax_list_del_rcu);
103748diff --git a/lib/lockref.c b/lib/lockref.c
103749index ecb9a66..a044fc5 100644
103750--- a/lib/lockref.c
103751+++ b/lib/lockref.c
103752@@ -48,13 +48,13 @@
103753 void lockref_get(struct lockref *lockref)
103754 {
103755 CMPXCHG_LOOP(
103756- new.count++;
103757+ __lockref_inc(&new);
103758 ,
103759 return;
103760 );
103761
103762 spin_lock(&lockref->lock);
103763- lockref->count++;
103764+ __lockref_inc(lockref);
103765 spin_unlock(&lockref->lock);
103766 }
103767 EXPORT_SYMBOL(lockref_get);
103768@@ -69,8 +69,8 @@ int lockref_get_not_zero(struct lockref *lockref)
103769 int retval;
103770
103771 CMPXCHG_LOOP(
103772- new.count++;
103773- if (old.count <= 0)
103774+ __lockref_inc(&new);
103775+ if (__lockref_read(&old) <= 0)
103776 return 0;
103777 ,
103778 return 1;
103779@@ -78,8 +78,8 @@ int lockref_get_not_zero(struct lockref *lockref)
103780
103781 spin_lock(&lockref->lock);
103782 retval = 0;
103783- if (lockref->count > 0) {
103784- lockref->count++;
103785+ if (__lockref_read(lockref) > 0) {
103786+ __lockref_inc(lockref);
103787 retval = 1;
103788 }
103789 spin_unlock(&lockref->lock);
103790@@ -96,17 +96,17 @@ EXPORT_SYMBOL(lockref_get_not_zero);
103791 int lockref_get_or_lock(struct lockref *lockref)
103792 {
103793 CMPXCHG_LOOP(
103794- new.count++;
103795- if (old.count <= 0)
103796+ __lockref_inc(&new);
103797+ if (__lockref_read(&old) <= 0)
103798 break;
103799 ,
103800 return 1;
103801 );
103802
103803 spin_lock(&lockref->lock);
103804- if (lockref->count <= 0)
103805+ if (__lockref_read(lockref) <= 0)
103806 return 0;
103807- lockref->count++;
103808+ __lockref_inc(lockref);
103809 spin_unlock(&lockref->lock);
103810 return 1;
103811 }
103812@@ -122,11 +122,11 @@ EXPORT_SYMBOL(lockref_get_or_lock);
103813 int lockref_put_return(struct lockref *lockref)
103814 {
103815 CMPXCHG_LOOP(
103816- new.count--;
103817- if (old.count <= 0)
103818+ __lockref_dec(&new);
103819+ if (__lockref_read(&old) <= 0)
103820 return -1;
103821 ,
103822- return new.count;
103823+ return __lockref_read(&new);
103824 );
103825 return -1;
103826 }
103827@@ -140,17 +140,17 @@ EXPORT_SYMBOL(lockref_put_return);
103828 int lockref_put_or_lock(struct lockref *lockref)
103829 {
103830 CMPXCHG_LOOP(
103831- new.count--;
103832- if (old.count <= 1)
103833+ __lockref_dec(&new);
103834+ if (__lockref_read(&old) <= 1)
103835 break;
103836 ,
103837 return 1;
103838 );
103839
103840 spin_lock(&lockref->lock);
103841- if (lockref->count <= 1)
103842+ if (__lockref_read(lockref) <= 1)
103843 return 0;
103844- lockref->count--;
103845+ __lockref_dec(lockref);
103846 spin_unlock(&lockref->lock);
103847 return 1;
103848 }
103849@@ -163,7 +163,7 @@ EXPORT_SYMBOL(lockref_put_or_lock);
103850 void lockref_mark_dead(struct lockref *lockref)
103851 {
103852 assert_spin_locked(&lockref->lock);
103853- lockref->count = -128;
103854+ __lockref_set(lockref, -128);
103855 }
103856 EXPORT_SYMBOL(lockref_mark_dead);
103857
103858@@ -177,8 +177,8 @@ int lockref_get_not_dead(struct lockref *lockref)
103859 int retval;
103860
103861 CMPXCHG_LOOP(
103862- new.count++;
103863- if (old.count < 0)
103864+ __lockref_inc(&new);
103865+ if (__lockref_read(&old) < 0)
103866 return 0;
103867 ,
103868 return 1;
103869@@ -186,8 +186,8 @@ int lockref_get_not_dead(struct lockref *lockref)
103870
103871 spin_lock(&lockref->lock);
103872 retval = 0;
103873- if (lockref->count >= 0) {
103874- lockref->count++;
103875+ if (__lockref_read(lockref) >= 0) {
103876+ __lockref_inc(lockref);
103877 retval = 1;
103878 }
103879 spin_unlock(&lockref->lock);
103880diff --git a/lib/nlattr.c b/lib/nlattr.c
103881index f5907d2..36072be 100644
103882--- a/lib/nlattr.c
103883+++ b/lib/nlattr.c
103884@@ -278,6 +278,8 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count)
103885 {
103886 int minlen = min_t(int, count, nla_len(src));
103887
103888+ BUG_ON(minlen < 0);
103889+
103890 memcpy(dest, nla_data(src), minlen);
103891 if (count > minlen)
103892 memset(dest + minlen, 0, count - minlen);
103893diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
103894index 6111bcb..02e816b 100644
103895--- a/lib/percpu-refcount.c
103896+++ b/lib/percpu-refcount.c
103897@@ -31,7 +31,7 @@
103898 * atomic_long_t can't hit 0 before we've added up all the percpu refs.
103899 */
103900
103901-#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
103902+#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 2))
103903
103904 static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
103905
103906diff --git a/lib/radix-tree.c b/lib/radix-tree.c
103907index 3d2aa27..a472f20 100644
103908--- a/lib/radix-tree.c
103909+++ b/lib/radix-tree.c
103910@@ -67,7 +67,7 @@ struct radix_tree_preload {
103911 int nr;
103912 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
103913 };
103914-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
103915+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
103916
103917 static inline void *ptr_to_indirect(void *ptr)
103918 {
103919diff --git a/lib/random32.c b/lib/random32.c
103920index 0bee183..526f12f 100644
103921--- a/lib/random32.c
103922+++ b/lib/random32.c
103923@@ -47,7 +47,7 @@ static inline void prandom_state_selftest(void)
103924 }
103925 #endif
103926
103927-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
103928+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
103929
103930 /**
103931 * prandom_u32_state - seeded pseudo-random number generator.
103932diff --git a/lib/rbtree.c b/lib/rbtree.c
103933index c16c81a..4dcbda1 100644
103934--- a/lib/rbtree.c
103935+++ b/lib/rbtree.c
103936@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
103937 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
103938
103939 static const struct rb_augment_callbacks dummy_callbacks = {
103940- dummy_propagate, dummy_copy, dummy_rotate
103941+ .propagate = dummy_propagate,
103942+ .copy = dummy_copy,
103943+ .rotate = dummy_rotate
103944 };
103945
103946 void rb_insert_color(struct rb_node *node, struct rb_root *root)
103947diff --git a/lib/show_mem.c b/lib/show_mem.c
103948index adc98e18..0ce83c2 100644
103949--- a/lib/show_mem.c
103950+++ b/lib/show_mem.c
103951@@ -49,6 +49,6 @@ void show_mem(unsigned int filter)
103952 quicklist_total_size());
103953 #endif
103954 #ifdef CONFIG_MEMORY_FAILURE
103955- printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
103956+ printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
103957 #endif
103958 }
103959diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
103960index e0af6ff..fcc9f15 100644
103961--- a/lib/strncpy_from_user.c
103962+++ b/lib/strncpy_from_user.c
103963@@ -22,7 +22,7 @@
103964 */
103965 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
103966 {
103967- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
103968+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
103969 long res = 0;
103970
103971 /*
103972diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
103973index a28df52..02dccaa 100644
103974--- a/lib/strnlen_user.c
103975+++ b/lib/strnlen_user.c
103976@@ -26,7 +26,7 @@
103977 */
103978 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
103979 {
103980- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
103981+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
103982 long align, res = 0;
103983 unsigned long c;
103984
103985@@ -57,7 +57,8 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
103986 return res + find_zero(data) + 1 - align;
103987 }
103988 res += sizeof(unsigned long);
103989- if (unlikely(max < sizeof(unsigned long)))
103990+ /* We already handled 'unsigned long' bytes. Did we do it all ? */
103991+ if (unlikely(max <= sizeof(unsigned long)))
103992 break;
103993 max -= sizeof(unsigned long);
103994 if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
103995diff --git a/lib/swiotlb.c b/lib/swiotlb.c
103996index 4abda07..b9d3765 100644
103997--- a/lib/swiotlb.c
103998+++ b/lib/swiotlb.c
103999@@ -682,7 +682,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
104000
104001 void
104002 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
104003- dma_addr_t dev_addr)
104004+ dma_addr_t dev_addr, struct dma_attrs *attrs)
104005 {
104006 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
104007
104008diff --git a/lib/test-hexdump.c b/lib/test-hexdump.c
104009index daf29a39..56f44ac 100644
104010--- a/lib/test-hexdump.c
104011+++ b/lib/test-hexdump.c
104012@@ -18,26 +18,26 @@ static const unsigned char data_b[] = {
104013
104014 static const unsigned char data_a[] = ".2.{....p..$}.4...1.....L...C...";
104015
104016-static const char *test_data_1_le[] __initconst = {
104017+static const char * const test_data_1_le[] __initconst = {
104018 "be", "32", "db", "7b", "0a", "18", "93", "b2",
104019 "70", "ba", "c4", "24", "7d", "83", "34", "9b",
104020 "a6", "9c", "31", "ad", "9c", "0f", "ac", "e9",
104021 "4c", "d1", "19", "99", "43", "b1", "af", "0c",
104022 };
104023
104024-static const char *test_data_2_le[] __initconst = {
104025+static const char * const test_data_2_le[] __initconst = {
104026 "32be", "7bdb", "180a", "b293",
104027 "ba70", "24c4", "837d", "9b34",
104028 "9ca6", "ad31", "0f9c", "e9ac",
104029 "d14c", "9919", "b143", "0caf",
104030 };
104031
104032-static const char *test_data_4_le[] __initconst = {
104033+static const char * const test_data_4_le[] __initconst = {
104034 "7bdb32be", "b293180a", "24c4ba70", "9b34837d",
104035 "ad319ca6", "e9ac0f9c", "9919d14c", "0cafb143",
104036 };
104037
104038-static const char *test_data_8_le[] __initconst = {
104039+static const char * const test_data_8_le[] __initconst = {
104040 "b293180a7bdb32be", "9b34837d24c4ba70",
104041 "e9ac0f9cad319ca6", "0cafb1439919d14c",
104042 };
104043diff --git a/lib/usercopy.c b/lib/usercopy.c
104044index 4f5b1dd..7cab418 100644
104045--- a/lib/usercopy.c
104046+++ b/lib/usercopy.c
104047@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
104048 WARN(1, "Buffer overflow detected!\n");
104049 }
104050 EXPORT_SYMBOL(copy_from_user_overflow);
104051+
104052+void copy_to_user_overflow(void)
104053+{
104054+ WARN(1, "Buffer overflow detected!\n");
104055+}
104056+EXPORT_SYMBOL(copy_to_user_overflow);
104057diff --git a/lib/vsprintf.c b/lib/vsprintf.c
104058index b235c96..343ffc1 100644
104059--- a/lib/vsprintf.c
104060+++ b/lib/vsprintf.c
104061@@ -16,6 +16,9 @@
104062 * - scnprintf and vscnprintf
104063 */
104064
104065+#ifdef CONFIG_GRKERNSEC_HIDESYM
104066+#define __INCLUDED_BY_HIDESYM 1
104067+#endif
104068 #include <stdarg.h>
104069 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
104070 #include <linux/types.h>
104071@@ -626,7 +629,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
104072 #ifdef CONFIG_KALLSYMS
104073 if (*fmt == 'B')
104074 sprint_backtrace(sym, value);
104075- else if (*fmt != 'f' && *fmt != 's')
104076+ else if (*fmt != 'f' && *fmt != 's' && *fmt != 'X')
104077 sprint_symbol(sym, value);
104078 else
104079 sprint_symbol_no_offset(sym, value);
104080@@ -1322,7 +1325,11 @@ char *address_val(char *buf, char *end, const void *addr,
104081 return number(buf, end, num, spec);
104082 }
104083
104084+#ifdef CONFIG_GRKERNSEC_HIDESYM
104085+int kptr_restrict __read_mostly = 2;
104086+#else
104087 int kptr_restrict __read_mostly;
104088+#endif
104089
104090 /*
104091 * Show a '%p' thing. A kernel extension is that the '%p' is followed
104092@@ -1333,8 +1340,10 @@ int kptr_restrict __read_mostly;
104093 *
104094 * - 'F' For symbolic function descriptor pointers with offset
104095 * - 'f' For simple symbolic function names without offset
104096+ * - 'X' For simple symbolic function names without offset approved for use with GRKERNSEC_HIDESYM
104097 * - 'S' For symbolic direct pointers with offset
104098 * - 's' For symbolic direct pointers without offset
104099+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
104100 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
104101 * - 'B' For backtraced symbolic direct pointers with offset
104102 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
104103@@ -1417,12 +1426,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
104104
104105 if (!ptr && *fmt != 'K') {
104106 /*
104107- * Print (null) with the same width as a pointer so it makes
104108+ * Print (nil) with the same width as a pointer so it makes
104109 * tabular output look nice.
104110 */
104111 if (spec.field_width == -1)
104112 spec.field_width = default_width;
104113- return string(buf, end, "(null)", spec);
104114+ return string(buf, end, "(nil)", spec);
104115 }
104116
104117 switch (*fmt) {
104118@@ -1432,6 +1441,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
104119 /* Fallthrough */
104120 case 'S':
104121 case 's':
104122+#ifdef CONFIG_GRKERNSEC_HIDESYM
104123+ break;
104124+#else
104125+ return symbol_string(buf, end, ptr, spec, fmt);
104126+#endif
104127+ case 'X':
104128+ ptr = dereference_function_descriptor(ptr);
104129+ case 'A':
104130 case 'B':
104131 return symbol_string(buf, end, ptr, spec, fmt);
104132 case 'R':
104133@@ -1496,6 +1513,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
104134 va_end(va);
104135 return buf;
104136 }
104137+ case 'P':
104138+ break;
104139 case 'K':
104140 /*
104141 * %pK cannot be used in IRQ context because its test
104142@@ -1553,6 +1572,22 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
104143 ((const struct file *)ptr)->f_path.dentry,
104144 spec, fmt);
104145 }
104146+
104147+#ifdef CONFIG_GRKERNSEC_HIDESYM
104148+ /* 'P' = approved pointers to copy to userland,
104149+ as in the /proc/kallsyms case, as we make it display nothing
104150+ for non-root users, and the real contents for root users
104151+ 'X' = approved simple symbols
104152+ Also ignore 'K' pointers, since we force their NULLing for non-root users
104153+ above
104154+ */
104155+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'X' && *fmt != 'K' && is_usercopy_object(buf)) {
104156+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
104157+ dump_stack();
104158+ ptr = NULL;
104159+ }
104160+#endif
104161+
104162 spec.flags |= SMALL;
104163 if (spec.field_width == -1) {
104164 spec.field_width = default_width;
104165@@ -2254,11 +2289,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
104166 typeof(type) value; \
104167 if (sizeof(type) == 8) { \
104168 args = PTR_ALIGN(args, sizeof(u32)); \
104169- *(u32 *)&value = *(u32 *)args; \
104170- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
104171+ *(u32 *)&value = *(const u32 *)args; \
104172+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
104173 } else { \
104174 args = PTR_ALIGN(args, sizeof(type)); \
104175- value = *(typeof(type) *)args; \
104176+ value = *(const typeof(type) *)args; \
104177 } \
104178 args += sizeof(type); \
104179 value; \
104180@@ -2321,7 +2356,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
104181 case FORMAT_TYPE_STR: {
104182 const char *str_arg = args;
104183 args += strlen(str_arg) + 1;
104184- str = string(str, end, (char *)str_arg, spec);
104185+ str = string(str, end, str_arg, spec);
104186 break;
104187 }
104188
104189diff --git a/localversion-grsec b/localversion-grsec
104190new file mode 100644
104191index 0000000..7cd6065
104192--- /dev/null
104193+++ b/localversion-grsec
104194@@ -0,0 +1 @@
104195+-grsec
104196diff --git a/mm/Kconfig b/mm/Kconfig
104197index a03131b..1b1bafb 100644
104198--- a/mm/Kconfig
104199+++ b/mm/Kconfig
104200@@ -342,10 +342,11 @@ config KSM
104201 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
104202
104203 config DEFAULT_MMAP_MIN_ADDR
104204- int "Low address space to protect from user allocation"
104205+ int "Low address space to protect from user allocation"
104206 depends on MMU
104207- default 4096
104208- help
104209+ default 32768 if ALPHA || ARM || PARISC || SPARC32
104210+ default 65536
104211+ help
104212 This is the portion of low virtual memory which should be protected
104213 from userspace allocation. Keeping a user from writing to low pages
104214 can help reduce the impact of kernel NULL pointer bugs.
104215@@ -376,7 +377,7 @@ config MEMORY_FAILURE
104216
104217 config HWPOISON_INJECT
104218 tristate "HWPoison pages injector"
104219- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
104220+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
104221 select PROC_PAGE_MONITOR
104222
104223 config NOMMU_INITIAL_TRIM_EXCESS
104224diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
104225index 957d3da..1d34e20 100644
104226--- a/mm/Kconfig.debug
104227+++ b/mm/Kconfig.debug
104228@@ -10,6 +10,7 @@ config PAGE_EXTENSION
104229 config DEBUG_PAGEALLOC
104230 bool "Debug page memory allocations"
104231 depends on DEBUG_KERNEL
104232+ depends on !PAX_MEMORY_SANITIZE
104233 depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
104234 depends on !KMEMCHECK
104235 select PAGE_EXTENSION
104236diff --git a/mm/backing-dev.c b/mm/backing-dev.c
104237index 6dc4580..e031ec1 100644
104238--- a/mm/backing-dev.c
104239+++ b/mm/backing-dev.c
104240@@ -12,7 +12,7 @@
104241 #include <linux/device.h>
104242 #include <trace/events/writeback.h>
104243
104244-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
104245+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
104246
104247 struct backing_dev_info noop_backing_dev_info = {
104248 .name = "noop",
104249@@ -474,7 +474,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name)
104250 return err;
104251
104252 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
104253- atomic_long_inc_return(&bdi_seq));
104254+ atomic_long_inc_return_unchecked(&bdi_seq));
104255 if (err) {
104256 bdi_destroy(bdi);
104257 return err;
104258diff --git a/mm/filemap.c b/mm/filemap.c
104259index ad72420..0a20ef2 100644
104260--- a/mm/filemap.c
104261+++ b/mm/filemap.c
104262@@ -2097,7 +2097,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
104263 struct address_space *mapping = file->f_mapping;
104264
104265 if (!mapping->a_ops->readpage)
104266- return -ENOEXEC;
104267+ return -ENODEV;
104268 file_accessed(file);
104269 vma->vm_ops = &generic_file_vm_ops;
104270 return 0;
104271@@ -2275,6 +2275,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
104272 *pos = i_size_read(inode);
104273
104274 if (limit != RLIM_INFINITY) {
104275+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
104276 if (*pos >= limit) {
104277 send_sig(SIGXFSZ, current, 0);
104278 return -EFBIG;
104279diff --git a/mm/gup.c b/mm/gup.c
104280index a6e24e2..72dd2cf 100644
104281--- a/mm/gup.c
104282+++ b/mm/gup.c
104283@@ -265,11 +265,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
104284 unsigned int fault_flags = 0;
104285 int ret;
104286
104287- /* For mlock, just skip the stack guard page. */
104288- if ((*flags & FOLL_MLOCK) &&
104289- (stack_guard_page_start(vma, address) ||
104290- stack_guard_page_end(vma, address + PAGE_SIZE)))
104291- return -ENOENT;
104292 if (*flags & FOLL_WRITE)
104293 fault_flags |= FAULT_FLAG_WRITE;
104294 if (nonblocking)
104295@@ -435,14 +430,14 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
104296 if (!(gup_flags & FOLL_FORCE))
104297 gup_flags |= FOLL_NUMA;
104298
104299- do {
104300+ while (nr_pages) {
104301 struct page *page;
104302 unsigned int foll_flags = gup_flags;
104303 unsigned int page_increm;
104304
104305 /* first iteration or cross vma bound */
104306 if (!vma || start >= vma->vm_end) {
104307- vma = find_extend_vma(mm, start);
104308+ vma = find_vma(mm, start);
104309 if (!vma && in_gate_area(mm, start)) {
104310 int ret;
104311 ret = get_gate_page(mm, start & PAGE_MASK,
104312@@ -454,7 +449,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
104313 goto next_page;
104314 }
104315
104316- if (!vma || check_vma_flags(vma, gup_flags))
104317+ if (!vma || start < vma->vm_start || check_vma_flags(vma, gup_flags))
104318 return i ? : -EFAULT;
104319 if (is_vm_hugetlb_page(vma)) {
104320 i = follow_hugetlb_page(mm, vma, pages, vmas,
104321@@ -509,7 +504,7 @@ next_page:
104322 i += page_increm;
104323 start += page_increm * PAGE_SIZE;
104324 nr_pages -= page_increm;
104325- } while (nr_pages);
104326+ }
104327 return i;
104328 }
104329 EXPORT_SYMBOL(__get_user_pages);
104330diff --git a/mm/highmem.c b/mm/highmem.c
104331index 123bcd3..0de52ba 100644
104332--- a/mm/highmem.c
104333+++ b/mm/highmem.c
104334@@ -195,8 +195,9 @@ static void flush_all_zero_pkmaps(void)
104335 * So no dangers, even with speculative execution.
104336 */
104337 page = pte_page(pkmap_page_table[i]);
104338+ pax_open_kernel();
104339 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
104340-
104341+ pax_close_kernel();
104342 set_page_address(page, NULL);
104343 need_flush = 1;
104344 }
104345@@ -259,9 +260,11 @@ start:
104346 }
104347 }
104348 vaddr = PKMAP_ADDR(last_pkmap_nr);
104349+
104350+ pax_open_kernel();
104351 set_pte_at(&init_mm, vaddr,
104352 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
104353-
104354+ pax_close_kernel();
104355 pkmap_count[last_pkmap_nr] = 1;
104356 set_page_address(page, (void *)vaddr);
104357
104358diff --git a/mm/hugetlb.c b/mm/hugetlb.c
104359index caad3c5..4f68807 100644
104360--- a/mm/hugetlb.c
104361+++ b/mm/hugetlb.c
104362@@ -2260,6 +2260,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
104363 struct ctl_table *table, int write,
104364 void __user *buffer, size_t *length, loff_t *ppos)
104365 {
104366+ ctl_table_no_const t;
104367 struct hstate *h = &default_hstate;
104368 unsigned long tmp = h->max_huge_pages;
104369 int ret;
104370@@ -2267,9 +2268,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
104371 if (!hugepages_supported())
104372 return -ENOTSUPP;
104373
104374- table->data = &tmp;
104375- table->maxlen = sizeof(unsigned long);
104376- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
104377+ t = *table;
104378+ t.data = &tmp;
104379+ t.maxlen = sizeof(unsigned long);
104380+ ret = proc_doulongvec_minmax(&t, write, buffer, length, ppos);
104381 if (ret)
104382 goto out;
104383
104384@@ -2304,6 +2306,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
104385 struct hstate *h = &default_hstate;
104386 unsigned long tmp;
104387 int ret;
104388+ ctl_table_no_const hugetlb_table;
104389
104390 if (!hugepages_supported())
104391 return -ENOTSUPP;
104392@@ -2313,9 +2316,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
104393 if (write && hstate_is_gigantic(h))
104394 return -EINVAL;
104395
104396- table->data = &tmp;
104397- table->maxlen = sizeof(unsigned long);
104398- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
104399+ hugetlb_table = *table;
104400+ hugetlb_table.data = &tmp;
104401+ hugetlb_table.maxlen = sizeof(unsigned long);
104402+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
104403 if (ret)
104404 goto out;
104405
104406@@ -2800,6 +2804,27 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
104407 i_mmap_unlock_write(mapping);
104408 }
104409
104410+#ifdef CONFIG_PAX_SEGMEXEC
104411+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
104412+{
104413+ struct mm_struct *mm = vma->vm_mm;
104414+ struct vm_area_struct *vma_m;
104415+ unsigned long address_m;
104416+ pte_t *ptep_m;
104417+
104418+ vma_m = pax_find_mirror_vma(vma);
104419+ if (!vma_m)
104420+ return;
104421+
104422+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
104423+ address_m = address + SEGMEXEC_TASK_SIZE;
104424+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
104425+ get_page(page_m);
104426+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
104427+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
104428+}
104429+#endif
104430+
104431 /*
104432 * Hugetlb_cow() should be called with page lock of the original hugepage held.
104433 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
104434@@ -2912,6 +2937,11 @@ retry_avoidcopy:
104435 make_huge_pte(vma, new_page, 1));
104436 page_remove_rmap(old_page);
104437 hugepage_add_new_anon_rmap(new_page, vma, address);
104438+
104439+#ifdef CONFIG_PAX_SEGMEXEC
104440+ pax_mirror_huge_pte(vma, address, new_page);
104441+#endif
104442+
104443 /* Make the old page be freed below */
104444 new_page = old_page;
104445 }
104446@@ -3072,6 +3102,10 @@ retry:
104447 && (vma->vm_flags & VM_SHARED)));
104448 set_huge_pte_at(mm, address, ptep, new_pte);
104449
104450+#ifdef CONFIG_PAX_SEGMEXEC
104451+ pax_mirror_huge_pte(vma, address, page);
104452+#endif
104453+
104454 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
104455 /* Optimization, do the COW without a second fault */
104456 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
104457@@ -3139,6 +3173,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
104458 struct address_space *mapping;
104459 int need_wait_lock = 0;
104460
104461+#ifdef CONFIG_PAX_SEGMEXEC
104462+ struct vm_area_struct *vma_m;
104463+#endif
104464+
104465 address &= huge_page_mask(h);
104466
104467 ptep = huge_pte_offset(mm, address);
104468@@ -3152,6 +3190,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
104469 VM_FAULT_SET_HINDEX(hstate_index(h));
104470 }
104471
104472+#ifdef CONFIG_PAX_SEGMEXEC
104473+ vma_m = pax_find_mirror_vma(vma);
104474+ if (vma_m) {
104475+ unsigned long address_m;
104476+
104477+ if (vma->vm_start > vma_m->vm_start) {
104478+ address_m = address;
104479+ address -= SEGMEXEC_TASK_SIZE;
104480+ vma = vma_m;
104481+ h = hstate_vma(vma);
104482+ } else
104483+ address_m = address + SEGMEXEC_TASK_SIZE;
104484+
104485+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
104486+ return VM_FAULT_OOM;
104487+ address_m &= HPAGE_MASK;
104488+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
104489+ }
104490+#endif
104491+
104492 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
104493 if (!ptep)
104494 return VM_FAULT_OOM;
104495diff --git a/mm/internal.h b/mm/internal.h
104496index a96da5b..42ebd54 100644
104497--- a/mm/internal.h
104498+++ b/mm/internal.h
104499@@ -156,6 +156,7 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
104500
104501 extern int __isolate_free_page(struct page *page, unsigned int order);
104502 extern void __free_pages_bootmem(struct page *page, unsigned int order);
104503+extern void free_compound_page(struct page *page);
104504 extern void prep_compound_page(struct page *page, unsigned long order);
104505 #ifdef CONFIG_MEMORY_FAILURE
104506 extern bool is_free_buddy_page(struct page *page);
104507@@ -411,7 +412,7 @@ extern u32 hwpoison_filter_enable;
104508
104509 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
104510 unsigned long, unsigned long,
104511- unsigned long, unsigned long);
104512+ unsigned long, unsigned long) __intentional_overflow(-1);
104513
104514 extern void set_pageblock_order(void);
104515 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
104516diff --git a/mm/kmemleak.c b/mm/kmemleak.c
104517index 5405aff..483406d 100644
104518--- a/mm/kmemleak.c
104519+++ b/mm/kmemleak.c
104520@@ -365,7 +365,7 @@ static void print_unreferenced(struct seq_file *seq,
104521
104522 for (i = 0; i < object->trace_len; i++) {
104523 void *ptr = (void *)object->trace[i];
104524- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
104525+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
104526 }
104527 }
104528
104529@@ -1911,7 +1911,7 @@ static int __init kmemleak_late_init(void)
104530 return -ENOMEM;
104531 }
104532
104533- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
104534+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
104535 &kmemleak_fops);
104536 if (!dentry)
104537 pr_warning("Failed to create the debugfs kmemleak file\n");
104538diff --git a/mm/maccess.c b/mm/maccess.c
104539index d53adf9..03a24bf 100644
104540--- a/mm/maccess.c
104541+++ b/mm/maccess.c
104542@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
104543 set_fs(KERNEL_DS);
104544 pagefault_disable();
104545 ret = __copy_from_user_inatomic(dst,
104546- (__force const void __user *)src, size);
104547+ (const void __force_user *)src, size);
104548 pagefault_enable();
104549 set_fs(old_fs);
104550
104551@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
104552
104553 set_fs(KERNEL_DS);
104554 pagefault_disable();
104555- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
104556+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
104557 pagefault_enable();
104558 set_fs(old_fs);
104559
104560diff --git a/mm/madvise.c b/mm/madvise.c
104561index d551475..8fdd7f3 100644
104562--- a/mm/madvise.c
104563+++ b/mm/madvise.c
104564@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
104565 pgoff_t pgoff;
104566 unsigned long new_flags = vma->vm_flags;
104567
104568+#ifdef CONFIG_PAX_SEGMEXEC
104569+ struct vm_area_struct *vma_m;
104570+#endif
104571+
104572 switch (behavior) {
104573 case MADV_NORMAL:
104574 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
104575@@ -126,6 +130,13 @@ success:
104576 /*
104577 * vm_flags is protected by the mmap_sem held in write mode.
104578 */
104579+
104580+#ifdef CONFIG_PAX_SEGMEXEC
104581+ vma_m = pax_find_mirror_vma(vma);
104582+ if (vma_m)
104583+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
104584+#endif
104585+
104586 vma->vm_flags = new_flags;
104587
104588 out:
104589@@ -277,11 +288,27 @@ static long madvise_dontneed(struct vm_area_struct *vma,
104590 struct vm_area_struct **prev,
104591 unsigned long start, unsigned long end)
104592 {
104593+
104594+#ifdef CONFIG_PAX_SEGMEXEC
104595+ struct vm_area_struct *vma_m;
104596+#endif
104597+
104598 *prev = vma;
104599 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
104600 return -EINVAL;
104601
104602 zap_page_range(vma, start, end - start, NULL);
104603+
104604+#ifdef CONFIG_PAX_SEGMEXEC
104605+ vma_m = pax_find_mirror_vma(vma);
104606+ if (vma_m) {
104607+ if (vma_m->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
104608+ return -EINVAL;
104609+
104610+ zap_page_range(vma_m, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
104611+ }
104612+#endif
104613+
104614 return 0;
104615 }
104616
104617@@ -484,6 +511,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
104618 if (end < start)
104619 return error;
104620
104621+#ifdef CONFIG_PAX_SEGMEXEC
104622+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
104623+ if (end > SEGMEXEC_TASK_SIZE)
104624+ return error;
104625+ } else
104626+#endif
104627+
104628+ if (end > TASK_SIZE)
104629+ return error;
104630+
104631 error = 0;
104632 if (end == start)
104633 return error;
104634diff --git a/mm/memory-failure.c b/mm/memory-failure.c
104635index 72a5224..51ba846 100644
104636--- a/mm/memory-failure.c
104637+++ b/mm/memory-failure.c
104638@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
104639
104640 int sysctl_memory_failure_recovery __read_mostly = 1;
104641
104642-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
104643+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
104644
104645 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
104646
104647@@ -198,7 +198,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
104648 pfn, t->comm, t->pid);
104649 si.si_signo = SIGBUS;
104650 si.si_errno = 0;
104651- si.si_addr = (void *)addr;
104652+ si.si_addr = (void __user *)addr;
104653 #ifdef __ARCH_SI_TRAPNO
104654 si.si_trapno = trapno;
104655 #endif
104656@@ -779,7 +779,7 @@ static struct page_state {
104657 unsigned long res;
104658 char *msg;
104659 int (*action)(struct page *p, unsigned long pfn);
104660-} error_states[] = {
104661+} __do_const error_states[] = {
104662 { reserved, reserved, "reserved kernel", me_kernel },
104663 /*
104664 * free pages are specially detected outside this table:
104665@@ -1087,7 +1087,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
104666 nr_pages = 1 << compound_order(hpage);
104667 else /* normal page or thp */
104668 nr_pages = 1;
104669- atomic_long_add(nr_pages, &num_poisoned_pages);
104670+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
104671
104672 /*
104673 * We need/can do nothing about count=0 pages.
104674@@ -1116,7 +1116,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
104675 if (PageHWPoison(hpage)) {
104676 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
104677 || (p != hpage && TestSetPageHWPoison(hpage))) {
104678- atomic_long_sub(nr_pages, &num_poisoned_pages);
104679+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
104680 unlock_page(hpage);
104681 return 0;
104682 }
104683@@ -1184,14 +1184,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
104684 */
104685 if (!PageHWPoison(p)) {
104686 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
104687- atomic_long_sub(nr_pages, &num_poisoned_pages);
104688+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
104689 put_page(hpage);
104690 res = 0;
104691 goto out;
104692 }
104693 if (hwpoison_filter(p)) {
104694 if (TestClearPageHWPoison(p))
104695- atomic_long_sub(nr_pages, &num_poisoned_pages);
104696+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
104697 unlock_page(hpage);
104698 put_page(hpage);
104699 return 0;
104700@@ -1421,7 +1421,7 @@ int unpoison_memory(unsigned long pfn)
104701 return 0;
104702 }
104703 if (TestClearPageHWPoison(p))
104704- atomic_long_dec(&num_poisoned_pages);
104705+ atomic_long_dec_unchecked(&num_poisoned_pages);
104706 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
104707 return 0;
104708 }
104709@@ -1435,7 +1435,7 @@ int unpoison_memory(unsigned long pfn)
104710 */
104711 if (TestClearPageHWPoison(page)) {
104712 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
104713- atomic_long_sub(nr_pages, &num_poisoned_pages);
104714+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
104715 freeit = 1;
104716 if (PageHuge(page))
104717 clear_page_hwpoison_huge_page(page);
104718@@ -1560,11 +1560,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
104719 if (PageHuge(page)) {
104720 set_page_hwpoison_huge_page(hpage);
104721 dequeue_hwpoisoned_huge_page(hpage);
104722- atomic_long_add(1 << compound_order(hpage),
104723+ atomic_long_add_unchecked(1 << compound_order(hpage),
104724 &num_poisoned_pages);
104725 } else {
104726 SetPageHWPoison(page);
104727- atomic_long_inc(&num_poisoned_pages);
104728+ atomic_long_inc_unchecked(&num_poisoned_pages);
104729 }
104730 }
104731 return ret;
104732@@ -1603,7 +1603,7 @@ static int __soft_offline_page(struct page *page, int flags)
104733 put_page(page);
104734 pr_info("soft_offline: %#lx: invalidated\n", pfn);
104735 SetPageHWPoison(page);
104736- atomic_long_inc(&num_poisoned_pages);
104737+ atomic_long_inc_unchecked(&num_poisoned_pages);
104738 return 0;
104739 }
104740
104741@@ -1652,7 +1652,7 @@ static int __soft_offline_page(struct page *page, int flags)
104742 if (!is_free_buddy_page(page))
104743 pr_info("soft offline: %#lx: page leaked\n",
104744 pfn);
104745- atomic_long_inc(&num_poisoned_pages);
104746+ atomic_long_inc_unchecked(&num_poisoned_pages);
104747 }
104748 } else {
104749 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
104750@@ -1722,11 +1722,11 @@ int soft_offline_page(struct page *page, int flags)
104751 if (PageHuge(page)) {
104752 set_page_hwpoison_huge_page(hpage);
104753 if (!dequeue_hwpoisoned_huge_page(hpage))
104754- atomic_long_add(1 << compound_order(hpage),
104755+ atomic_long_add_unchecked(1 << compound_order(hpage),
104756 &num_poisoned_pages);
104757 } else {
104758 if (!TestSetPageHWPoison(page))
104759- atomic_long_inc(&num_poisoned_pages);
104760+ atomic_long_inc_unchecked(&num_poisoned_pages);
104761 }
104762 }
104763 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
104764diff --git a/mm/memory.c b/mm/memory.c
104765index 97839f5..4bc5530 100644
104766--- a/mm/memory.c
104767+++ b/mm/memory.c
104768@@ -414,6 +414,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
104769 free_pte_range(tlb, pmd, addr);
104770 } while (pmd++, addr = next, addr != end);
104771
104772+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
104773 start &= PUD_MASK;
104774 if (start < floor)
104775 return;
104776@@ -429,6 +430,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
104777 pud_clear(pud);
104778 pmd_free_tlb(tlb, pmd, start);
104779 mm_dec_nr_pmds(tlb->mm);
104780+#endif
104781 }
104782
104783 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
104784@@ -448,6 +450,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
104785 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
104786 } while (pud++, addr = next, addr != end);
104787
104788+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
104789 start &= PGDIR_MASK;
104790 if (start < floor)
104791 return;
104792@@ -462,6 +465,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
104793 pud = pud_offset(pgd, start);
104794 pgd_clear(pgd);
104795 pud_free_tlb(tlb, pud, start);
104796+#endif
104797+
104798 }
104799
104800 /*
104801@@ -691,10 +696,10 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
104802 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
104803 */
104804 if (vma->vm_ops)
104805- printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
104806+ printk(KERN_ALERT "vma->vm_ops->fault: %pAR\n",
104807 vma->vm_ops->fault);
104808 if (vma->vm_file)
104809- printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
104810+ printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pAR\n",
104811 vma->vm_file->f_op->mmap);
104812 dump_stack();
104813 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
104814@@ -1464,6 +1469,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
104815 page_add_file_rmap(page);
104816 set_pte_at(mm, addr, pte, mk_pte(page, prot));
104817
104818+#ifdef CONFIG_PAX_SEGMEXEC
104819+ pax_mirror_file_pte(vma, addr, page, ptl);
104820+#endif
104821+
104822 retval = 0;
104823 pte_unmap_unlock(pte, ptl);
104824 return retval;
104825@@ -1508,9 +1517,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
104826 if (!page_count(page))
104827 return -EINVAL;
104828 if (!(vma->vm_flags & VM_MIXEDMAP)) {
104829+
104830+#ifdef CONFIG_PAX_SEGMEXEC
104831+ struct vm_area_struct *vma_m;
104832+#endif
104833+
104834 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
104835 BUG_ON(vma->vm_flags & VM_PFNMAP);
104836 vma->vm_flags |= VM_MIXEDMAP;
104837+
104838+#ifdef CONFIG_PAX_SEGMEXEC
104839+ vma_m = pax_find_mirror_vma(vma);
104840+ if (vma_m)
104841+ vma_m->vm_flags |= VM_MIXEDMAP;
104842+#endif
104843+
104844 }
104845 return insert_page(vma, addr, page, vma->vm_page_prot);
104846 }
104847@@ -1593,6 +1614,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
104848 unsigned long pfn)
104849 {
104850 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
104851+ BUG_ON(vma->vm_mirror);
104852
104853 if (addr < vma->vm_start || addr >= vma->vm_end)
104854 return -EFAULT;
104855@@ -1840,7 +1862,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
104856
104857 BUG_ON(pud_huge(*pud));
104858
104859- pmd = pmd_alloc(mm, pud, addr);
104860+ pmd = (mm == &init_mm) ?
104861+ pmd_alloc_kernel(mm, pud, addr) :
104862+ pmd_alloc(mm, pud, addr);
104863 if (!pmd)
104864 return -ENOMEM;
104865 do {
104866@@ -1860,7 +1884,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
104867 unsigned long next;
104868 int err;
104869
104870- pud = pud_alloc(mm, pgd, addr);
104871+ pud = (mm == &init_mm) ?
104872+ pud_alloc_kernel(mm, pgd, addr) :
104873+ pud_alloc(mm, pgd, addr);
104874 if (!pud)
104875 return -ENOMEM;
104876 do {
104877@@ -1982,6 +2008,185 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
104878 return ret;
104879 }
104880
104881+#ifdef CONFIG_PAX_SEGMEXEC
104882+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
104883+{
104884+ struct mm_struct *mm = vma->vm_mm;
104885+ spinlock_t *ptl;
104886+ pte_t *pte, entry;
104887+
104888+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
104889+ entry = *pte;
104890+ if (!pte_present(entry)) {
104891+ if (!pte_none(entry)) {
104892+ free_swap_and_cache(pte_to_swp_entry(entry));
104893+ pte_clear_not_present_full(mm, address, pte, 0);
104894+ }
104895+ } else {
104896+ struct page *page;
104897+
104898+ flush_cache_page(vma, address, pte_pfn(entry));
104899+ entry = ptep_clear_flush(vma, address, pte);
104900+ BUG_ON(pte_dirty(entry));
104901+ page = vm_normal_page(vma, address, entry);
104902+ if (page) {
104903+ update_hiwater_rss(mm);
104904+ if (PageAnon(page))
104905+ dec_mm_counter_fast(mm, MM_ANONPAGES);
104906+ else
104907+ dec_mm_counter_fast(mm, MM_FILEPAGES);
104908+ page_remove_rmap(page);
104909+ page_cache_release(page);
104910+ }
104911+ }
104912+ pte_unmap_unlock(pte, ptl);
104913+}
104914+
104915+/* PaX: if vma is mirrored, synchronize the mirror's PTE
104916+ *
104917+ * the ptl of the lower mapped page is held on entry and is not released on exit
104918+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
104919+ */
104920+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
104921+{
104922+ struct mm_struct *mm = vma->vm_mm;
104923+ unsigned long address_m;
104924+ spinlock_t *ptl_m;
104925+ struct vm_area_struct *vma_m;
104926+ pmd_t *pmd_m;
104927+ pte_t *pte_m, entry_m;
104928+
104929+ BUG_ON(!page_m || !PageAnon(page_m));
104930+
104931+ vma_m = pax_find_mirror_vma(vma);
104932+ if (!vma_m)
104933+ return;
104934+
104935+ BUG_ON(!PageLocked(page_m));
104936+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
104937+ address_m = address + SEGMEXEC_TASK_SIZE;
104938+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
104939+ pte_m = pte_offset_map(pmd_m, address_m);
104940+ ptl_m = pte_lockptr(mm, pmd_m);
104941+ if (ptl != ptl_m) {
104942+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
104943+ if (!pte_none(*pte_m))
104944+ goto out;
104945+ }
104946+
104947+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
104948+ page_cache_get(page_m);
104949+ page_add_anon_rmap(page_m, vma_m, address_m);
104950+ inc_mm_counter_fast(mm, MM_ANONPAGES);
104951+ set_pte_at(mm, address_m, pte_m, entry_m);
104952+ update_mmu_cache(vma_m, address_m, pte_m);
104953+out:
104954+ if (ptl != ptl_m)
104955+ spin_unlock(ptl_m);
104956+ pte_unmap(pte_m);
104957+ unlock_page(page_m);
104958+}
104959+
104960+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
104961+{
104962+ struct mm_struct *mm = vma->vm_mm;
104963+ unsigned long address_m;
104964+ spinlock_t *ptl_m;
104965+ struct vm_area_struct *vma_m;
104966+ pmd_t *pmd_m;
104967+ pte_t *pte_m, entry_m;
104968+
104969+ BUG_ON(!page_m || PageAnon(page_m));
104970+
104971+ vma_m = pax_find_mirror_vma(vma);
104972+ if (!vma_m)
104973+ return;
104974+
104975+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
104976+ address_m = address + SEGMEXEC_TASK_SIZE;
104977+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
104978+ pte_m = pte_offset_map(pmd_m, address_m);
104979+ ptl_m = pte_lockptr(mm, pmd_m);
104980+ if (ptl != ptl_m) {
104981+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
104982+ if (!pte_none(*pte_m))
104983+ goto out;
104984+ }
104985+
104986+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
104987+ page_cache_get(page_m);
104988+ page_add_file_rmap(page_m);
104989+ inc_mm_counter_fast(mm, MM_FILEPAGES);
104990+ set_pte_at(mm, address_m, pte_m, entry_m);
104991+ update_mmu_cache(vma_m, address_m, pte_m);
104992+out:
104993+ if (ptl != ptl_m)
104994+ spin_unlock(ptl_m);
104995+ pte_unmap(pte_m);
104996+}
104997+
104998+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
104999+{
105000+ struct mm_struct *mm = vma->vm_mm;
105001+ unsigned long address_m;
105002+ spinlock_t *ptl_m;
105003+ struct vm_area_struct *vma_m;
105004+ pmd_t *pmd_m;
105005+ pte_t *pte_m, entry_m;
105006+
105007+ vma_m = pax_find_mirror_vma(vma);
105008+ if (!vma_m)
105009+ return;
105010+
105011+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
105012+ address_m = address + SEGMEXEC_TASK_SIZE;
105013+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
105014+ pte_m = pte_offset_map(pmd_m, address_m);
105015+ ptl_m = pte_lockptr(mm, pmd_m);
105016+ if (ptl != ptl_m) {
105017+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
105018+ if (!pte_none(*pte_m))
105019+ goto out;
105020+ }
105021+
105022+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
105023+ set_pte_at(mm, address_m, pte_m, entry_m);
105024+out:
105025+ if (ptl != ptl_m)
105026+ spin_unlock(ptl_m);
105027+ pte_unmap(pte_m);
105028+}
105029+
105030+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
105031+{
105032+ struct page *page_m;
105033+ pte_t entry;
105034+
105035+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
105036+ goto out;
105037+
105038+ entry = *pte;
105039+ page_m = vm_normal_page(vma, address, entry);
105040+ if (!page_m)
105041+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
105042+ else if (PageAnon(page_m)) {
105043+ if (pax_find_mirror_vma(vma)) {
105044+ pte_unmap_unlock(pte, ptl);
105045+ lock_page(page_m);
105046+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
105047+ if (pte_same(entry, *pte))
105048+ pax_mirror_anon_pte(vma, address, page_m, ptl);
105049+ else
105050+ unlock_page(page_m);
105051+ }
105052+ } else
105053+ pax_mirror_file_pte(vma, address, page_m, ptl);
105054+
105055+out:
105056+ pte_unmap_unlock(pte, ptl);
105057+}
105058+#endif
105059+
105060 /*
105061 * This routine handles present pages, when users try to write
105062 * to a shared page. It is done by copying the page to a new address
105063@@ -2172,6 +2377,12 @@ gotten:
105064 */
105065 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
105066 if (likely(pte_same(*page_table, orig_pte))) {
105067+
105068+#ifdef CONFIG_PAX_SEGMEXEC
105069+ if (pax_find_mirror_vma(vma))
105070+ BUG_ON(!trylock_page(new_page));
105071+#endif
105072+
105073 if (old_page) {
105074 if (!PageAnon(old_page)) {
105075 dec_mm_counter_fast(mm, MM_FILEPAGES);
105076@@ -2225,6 +2436,10 @@ gotten:
105077 page_remove_rmap(old_page);
105078 }
105079
105080+#ifdef CONFIG_PAX_SEGMEXEC
105081+ pax_mirror_anon_pte(vma, address, new_page, ptl);
105082+#endif
105083+
105084 /* Free the old page.. */
105085 new_page = old_page;
105086 ret |= VM_FAULT_WRITE;
105087@@ -2483,6 +2698,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
105088 swap_free(entry);
105089 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
105090 try_to_free_swap(page);
105091+
105092+#ifdef CONFIG_PAX_SEGMEXEC
105093+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
105094+#endif
105095+
105096 unlock_page(page);
105097 if (page != swapcache) {
105098 /*
105099@@ -2506,6 +2726,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
105100
105101 /* No need to invalidate - it was non-present before */
105102 update_mmu_cache(vma, address, page_table);
105103+
105104+#ifdef CONFIG_PAX_SEGMEXEC
105105+ pax_mirror_anon_pte(vma, address, page, ptl);
105106+#endif
105107+
105108 unlock:
105109 pte_unmap_unlock(page_table, ptl);
105110 out:
105111@@ -2525,40 +2750,6 @@ out_release:
105112 }
105113
105114 /*
105115- * This is like a special single-page "expand_{down|up}wards()",
105116- * except we must first make sure that 'address{-|+}PAGE_SIZE'
105117- * doesn't hit another vma.
105118- */
105119-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
105120-{
105121- address &= PAGE_MASK;
105122- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
105123- struct vm_area_struct *prev = vma->vm_prev;
105124-
105125- /*
105126- * Is there a mapping abutting this one below?
105127- *
105128- * That's only ok if it's the same stack mapping
105129- * that has gotten split..
105130- */
105131- if (prev && prev->vm_end == address)
105132- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
105133-
105134- return expand_downwards(vma, address - PAGE_SIZE);
105135- }
105136- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
105137- struct vm_area_struct *next = vma->vm_next;
105138-
105139- /* As VM_GROWSDOWN but s/below/above/ */
105140- if (next && next->vm_start == address + PAGE_SIZE)
105141- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
105142-
105143- return expand_upwards(vma, address + PAGE_SIZE);
105144- }
105145- return 0;
105146-}
105147-
105148-/*
105149 * We enter with non-exclusive mmap_sem (to exclude vma changes,
105150 * but allow concurrent faults), and pte mapped but not yet locked.
105151 * We return with mmap_sem still held, but pte unmapped and unlocked.
105152@@ -2568,27 +2759,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
105153 unsigned int flags)
105154 {
105155 struct mem_cgroup *memcg;
105156- struct page *page;
105157+ struct page *page = NULL;
105158 spinlock_t *ptl;
105159 pte_t entry;
105160
105161- pte_unmap(page_table);
105162-
105163- /* Check if we need to add a guard page to the stack */
105164- if (check_stack_guard_page(vma, address) < 0)
105165- return VM_FAULT_SIGSEGV;
105166-
105167- /* Use the zero-page for reads */
105168 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
105169 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
105170 vma->vm_page_prot));
105171- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
105172+ ptl = pte_lockptr(mm, pmd);
105173+ spin_lock(ptl);
105174 if (!pte_none(*page_table))
105175 goto unlock;
105176 goto setpte;
105177 }
105178
105179 /* Allocate our own private page. */
105180+ pte_unmap(page_table);
105181+
105182 if (unlikely(anon_vma_prepare(vma)))
105183 goto oom;
105184 page = alloc_zeroed_user_highpage_movable(vma, address);
105185@@ -2612,6 +2799,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
105186 if (!pte_none(*page_table))
105187 goto release;
105188
105189+#ifdef CONFIG_PAX_SEGMEXEC
105190+ if (pax_find_mirror_vma(vma))
105191+ BUG_ON(!trylock_page(page));
105192+#endif
105193+
105194 inc_mm_counter_fast(mm, MM_ANONPAGES);
105195 page_add_new_anon_rmap(page, vma, address);
105196 mem_cgroup_commit_charge(page, memcg, false);
105197@@ -2621,6 +2813,12 @@ setpte:
105198
105199 /* No need to invalidate - it was non-present before */
105200 update_mmu_cache(vma, address, page_table);
105201+
105202+#ifdef CONFIG_PAX_SEGMEXEC
105203+ if (page)
105204+ pax_mirror_anon_pte(vma, address, page, ptl);
105205+#endif
105206+
105207 unlock:
105208 pte_unmap_unlock(page_table, ptl);
105209 return 0;
105210@@ -2853,6 +3051,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
105211 return ret;
105212 }
105213 do_set_pte(vma, address, fault_page, pte, false, false);
105214+
105215+#ifdef CONFIG_PAX_SEGMEXEC
105216+ pax_mirror_file_pte(vma, address, fault_page, ptl);
105217+#endif
105218+
105219 unlock_page(fault_page);
105220 unlock_out:
105221 pte_unmap_unlock(pte, ptl);
105222@@ -2904,7 +3107,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
105223 }
105224 goto uncharge_out;
105225 }
105226+
105227+#ifdef CONFIG_PAX_SEGMEXEC
105228+ if (pax_find_mirror_vma(vma))
105229+ BUG_ON(!trylock_page(new_page));
105230+#endif
105231+
105232 do_set_pte(vma, address, new_page, pte, true, true);
105233+
105234+#ifdef CONFIG_PAX_SEGMEXEC
105235+ pax_mirror_anon_pte(vma, address, new_page, ptl);
105236+#endif
105237+
105238 mem_cgroup_commit_charge(new_page, memcg, false);
105239 lru_cache_add_active_or_unevictable(new_page, vma);
105240 pte_unmap_unlock(pte, ptl);
105241@@ -2962,6 +3176,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
105242 return ret;
105243 }
105244 do_set_pte(vma, address, fault_page, pte, true, false);
105245+
105246+#ifdef CONFIG_PAX_SEGMEXEC
105247+ pax_mirror_file_pte(vma, address, fault_page, ptl);
105248+#endif
105249+
105250 pte_unmap_unlock(pte, ptl);
105251
105252 if (set_page_dirty(fault_page))
105253@@ -3185,6 +3404,12 @@ static int handle_pte_fault(struct mm_struct *mm,
105254 if (flags & FAULT_FLAG_WRITE)
105255 flush_tlb_fix_spurious_fault(vma, address);
105256 }
105257+
105258+#ifdef CONFIG_PAX_SEGMEXEC
105259+ pax_mirror_pte(vma, address, pte, pmd, ptl);
105260+ return 0;
105261+#endif
105262+
105263 unlock:
105264 pte_unmap_unlock(pte, ptl);
105265 return 0;
105266@@ -3204,9 +3429,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
105267 pmd_t *pmd;
105268 pte_t *pte;
105269
105270+#ifdef CONFIG_PAX_SEGMEXEC
105271+ struct vm_area_struct *vma_m;
105272+#endif
105273+
105274 if (unlikely(is_vm_hugetlb_page(vma)))
105275 return hugetlb_fault(mm, vma, address, flags);
105276
105277+#ifdef CONFIG_PAX_SEGMEXEC
105278+ vma_m = pax_find_mirror_vma(vma);
105279+ if (vma_m) {
105280+ unsigned long address_m;
105281+ pgd_t *pgd_m;
105282+ pud_t *pud_m;
105283+ pmd_t *pmd_m;
105284+
105285+ if (vma->vm_start > vma_m->vm_start) {
105286+ address_m = address;
105287+ address -= SEGMEXEC_TASK_SIZE;
105288+ vma = vma_m;
105289+ } else
105290+ address_m = address + SEGMEXEC_TASK_SIZE;
105291+
105292+ pgd_m = pgd_offset(mm, address_m);
105293+ pud_m = pud_alloc(mm, pgd_m, address_m);
105294+ if (!pud_m)
105295+ return VM_FAULT_OOM;
105296+ pmd_m = pmd_alloc(mm, pud_m, address_m);
105297+ if (!pmd_m)
105298+ return VM_FAULT_OOM;
105299+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
105300+ return VM_FAULT_OOM;
105301+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
105302+ }
105303+#endif
105304+
105305 pgd = pgd_offset(mm, address);
105306 pud = pud_alloc(mm, pgd, address);
105307 if (!pud)
105308@@ -3341,6 +3598,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
105309 spin_unlock(&mm->page_table_lock);
105310 return 0;
105311 }
105312+
105313+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
105314+{
105315+ pud_t *new = pud_alloc_one(mm, address);
105316+ if (!new)
105317+ return -ENOMEM;
105318+
105319+ smp_wmb(); /* See comment in __pte_alloc */
105320+
105321+ spin_lock(&mm->page_table_lock);
105322+ if (pgd_present(*pgd)) /* Another has populated it */
105323+ pud_free(mm, new);
105324+ else
105325+ pgd_populate_kernel(mm, pgd, new);
105326+ spin_unlock(&mm->page_table_lock);
105327+ return 0;
105328+}
105329 #endif /* __PAGETABLE_PUD_FOLDED */
105330
105331 #ifndef __PAGETABLE_PMD_FOLDED
105332@@ -3373,6 +3647,32 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
105333 spin_unlock(&mm->page_table_lock);
105334 return 0;
105335 }
105336+
105337+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
105338+{
105339+ pmd_t *new = pmd_alloc_one(mm, address);
105340+ if (!new)
105341+ return -ENOMEM;
105342+
105343+ smp_wmb(); /* See comment in __pte_alloc */
105344+
105345+ spin_lock(&mm->page_table_lock);
105346+#ifndef __ARCH_HAS_4LEVEL_HACK
105347+ if (!pud_present(*pud)) {
105348+ mm_inc_nr_pmds(mm);
105349+ pud_populate_kernel(mm, pud, new);
105350+ } else /* Another has populated it */
105351+ pmd_free(mm, new);
105352+#else
105353+ if (!pgd_present(*pud)) {
105354+ mm_inc_nr_pmds(mm);
105355+ pgd_populate_kernel(mm, pud, new);
105356+ } else /* Another has populated it */
105357+ pmd_free(mm, new);
105358+#endif /* __ARCH_HAS_4LEVEL_HACK */
105359+ spin_unlock(&mm->page_table_lock);
105360+ return 0;
105361+}
105362 #endif /* __PAGETABLE_PMD_FOLDED */
105363
105364 static int __follow_pte(struct mm_struct *mm, unsigned long address,
105365@@ -3482,8 +3782,8 @@ out:
105366 return ret;
105367 }
105368
105369-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
105370- void *buf, int len, int write)
105371+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
105372+ void *buf, size_t len, int write)
105373 {
105374 resource_size_t phys_addr;
105375 unsigned long prot = 0;
105376@@ -3509,8 +3809,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
105377 * Access another process' address space as given in mm. If non-NULL, use the
105378 * given task for page fault accounting.
105379 */
105380-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
105381- unsigned long addr, void *buf, int len, int write)
105382+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
105383+ unsigned long addr, void *buf, size_t len, int write)
105384 {
105385 struct vm_area_struct *vma;
105386 void *old_buf = buf;
105387@@ -3518,7 +3818,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
105388 down_read(&mm->mmap_sem);
105389 /* ignore errors, just check how much was successfully transferred */
105390 while (len) {
105391- int bytes, ret, offset;
105392+ ssize_t bytes, ret, offset;
105393 void *maddr;
105394 struct page *page = NULL;
105395
105396@@ -3579,8 +3879,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
105397 *
105398 * The caller must hold a reference on @mm.
105399 */
105400-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
105401- void *buf, int len, int write)
105402+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
105403+ void *buf, size_t len, int write)
105404 {
105405 return __access_remote_vm(NULL, mm, addr, buf, len, write);
105406 }
105407@@ -3590,11 +3890,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
105408 * Source/target buffer must be kernel space,
105409 * Do not walk the page table directly, use get_user_pages
105410 */
105411-int access_process_vm(struct task_struct *tsk, unsigned long addr,
105412- void *buf, int len, int write)
105413+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
105414+ void *buf, size_t len, int write)
105415 {
105416 struct mm_struct *mm;
105417- int ret;
105418+ ssize_t ret;
105419
105420 mm = get_task_mm(tsk);
105421 if (!mm)
105422diff --git a/mm/mempolicy.c b/mm/mempolicy.c
105423index de5dc5e..68a4ea3 100644
105424--- a/mm/mempolicy.c
105425+++ b/mm/mempolicy.c
105426@@ -703,6 +703,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
105427 unsigned long vmstart;
105428 unsigned long vmend;
105429
105430+#ifdef CONFIG_PAX_SEGMEXEC
105431+ struct vm_area_struct *vma_m;
105432+#endif
105433+
105434 vma = find_vma(mm, start);
105435 if (!vma || vma->vm_start > start)
105436 return -EFAULT;
105437@@ -746,6 +750,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
105438 err = vma_replace_policy(vma, new_pol);
105439 if (err)
105440 goto out;
105441+
105442+#ifdef CONFIG_PAX_SEGMEXEC
105443+ vma_m = pax_find_mirror_vma(vma);
105444+ if (vma_m) {
105445+ err = vma_replace_policy(vma_m, new_pol);
105446+ if (err)
105447+ goto out;
105448+ }
105449+#endif
105450+
105451 }
105452
105453 out:
105454@@ -1160,6 +1174,17 @@ static long do_mbind(unsigned long start, unsigned long len,
105455
105456 if (end < start)
105457 return -EINVAL;
105458+
105459+#ifdef CONFIG_PAX_SEGMEXEC
105460+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
105461+ if (end > SEGMEXEC_TASK_SIZE)
105462+ return -EINVAL;
105463+ } else
105464+#endif
105465+
105466+ if (end > TASK_SIZE)
105467+ return -EINVAL;
105468+
105469 if (end == start)
105470 return 0;
105471
105472@@ -1385,8 +1410,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
105473 */
105474 tcred = __task_cred(task);
105475 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
105476- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
105477- !capable(CAP_SYS_NICE)) {
105478+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
105479 rcu_read_unlock();
105480 err = -EPERM;
105481 goto out_put;
105482@@ -1417,6 +1441,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
105483 goto out;
105484 }
105485
105486+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
105487+ if (mm != current->mm &&
105488+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
105489+ mmput(mm);
105490+ err = -EPERM;
105491+ goto out;
105492+ }
105493+#endif
105494+
105495 err = do_migrate_pages(mm, old, new,
105496 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
105497
105498diff --git a/mm/migrate.c b/mm/migrate.c
105499index 85e0426..be49beb 100644
105500--- a/mm/migrate.c
105501+++ b/mm/migrate.c
105502@@ -1472,8 +1472,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
105503 */
105504 tcred = __task_cred(task);
105505 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
105506- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
105507- !capable(CAP_SYS_NICE)) {
105508+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
105509 rcu_read_unlock();
105510 err = -EPERM;
105511 goto out;
105512diff --git a/mm/mlock.c b/mm/mlock.c
105513index 8a54cd2..92f1747 100644
105514--- a/mm/mlock.c
105515+++ b/mm/mlock.c
105516@@ -14,6 +14,7 @@
105517 #include <linux/pagevec.h>
105518 #include <linux/mempolicy.h>
105519 #include <linux/syscalls.h>
105520+#include <linux/security.h>
105521 #include <linux/sched.h>
105522 #include <linux/export.h>
105523 #include <linux/rmap.h>
105524@@ -613,7 +614,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
105525 {
105526 unsigned long nstart, end, tmp;
105527 struct vm_area_struct * vma, * prev;
105528- int error;
105529+ int error = 0;
105530
105531 VM_BUG_ON(start & ~PAGE_MASK);
105532 VM_BUG_ON(len != PAGE_ALIGN(len));
105533@@ -622,6 +623,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
105534 return -EINVAL;
105535 if (end == start)
105536 return 0;
105537+ if (end > TASK_SIZE)
105538+ return -EINVAL;
105539+
105540 vma = find_vma(current->mm, start);
105541 if (!vma || vma->vm_start > start)
105542 return -ENOMEM;
105543@@ -633,6 +637,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
105544 for (nstart = start ; ; ) {
105545 vm_flags_t newflags;
105546
105547+#ifdef CONFIG_PAX_SEGMEXEC
105548+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
105549+ break;
105550+#endif
105551+
105552 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
105553
105554 newflags = vma->vm_flags & ~VM_LOCKED;
105555@@ -746,6 +755,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
105556 locked += current->mm->locked_vm;
105557
105558 /* check against resource limits */
105559+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
105560 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
105561 error = do_mlock(start, len, 1);
105562
105563@@ -783,6 +793,11 @@ static int do_mlockall(int flags)
105564 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
105565 vm_flags_t newflags;
105566
105567+#ifdef CONFIG_PAX_SEGMEXEC
105568+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
105569+ break;
105570+#endif
105571+
105572 newflags = vma->vm_flags & ~VM_LOCKED;
105573 if (flags & MCL_CURRENT)
105574 newflags |= VM_LOCKED;
105575@@ -814,8 +829,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
105576 lock_limit >>= PAGE_SHIFT;
105577
105578 ret = -ENOMEM;
105579+
105580+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
105581+
105582 down_write(&current->mm->mmap_sem);
105583-
105584 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
105585 capable(CAP_IPC_LOCK))
105586 ret = do_mlockall(flags);
105587diff --git a/mm/mm_init.c b/mm/mm_init.c
105588index 5f420f7..dd42fb1b 100644
105589--- a/mm/mm_init.c
105590+++ b/mm/mm_init.c
105591@@ -177,7 +177,7 @@ static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
105592 return NOTIFY_OK;
105593 }
105594
105595-static struct notifier_block compute_batch_nb __meminitdata = {
105596+static struct notifier_block compute_batch_nb __meminitconst = {
105597 .notifier_call = mm_compute_batch_notifier,
105598 .priority = IPC_CALLBACK_PRI, /* use lowest priority */
105599 };
105600diff --git a/mm/mmap.c b/mm/mmap.c
105601index 9ec50a3..0476e2d 100644
105602--- a/mm/mmap.c
105603+++ b/mm/mmap.c
105604@@ -41,6 +41,7 @@
105605 #include <linux/notifier.h>
105606 #include <linux/memory.h>
105607 #include <linux/printk.h>
105608+#include <linux/random.h>
105609
105610 #include <asm/uaccess.h>
105611 #include <asm/cacheflush.h>
105612@@ -57,6 +58,16 @@
105613 #define arch_rebalance_pgtables(addr, len) (addr)
105614 #endif
105615
105616+static inline void verify_mm_writelocked(struct mm_struct *mm)
105617+{
105618+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
105619+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
105620+ up_read(&mm->mmap_sem);
105621+ BUG();
105622+ }
105623+#endif
105624+}
105625+
105626 static void unmap_region(struct mm_struct *mm,
105627 struct vm_area_struct *vma, struct vm_area_struct *prev,
105628 unsigned long start, unsigned long end);
105629@@ -76,16 +87,25 @@ static void unmap_region(struct mm_struct *mm,
105630 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
105631 *
105632 */
105633-pgprot_t protection_map[16] = {
105634+pgprot_t protection_map[16] __read_only = {
105635 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
105636 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
105637 };
105638
105639-pgprot_t vm_get_page_prot(unsigned long vm_flags)
105640+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
105641 {
105642- return __pgprot(pgprot_val(protection_map[vm_flags &
105643+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
105644 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
105645 pgprot_val(arch_vm_get_page_prot(vm_flags)));
105646+
105647+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
105648+ if (!(__supported_pte_mask & _PAGE_NX) &&
105649+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
105650+ (vm_flags & (VM_READ | VM_WRITE)))
105651+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
105652+#endif
105653+
105654+ return prot;
105655 }
105656 EXPORT_SYMBOL(vm_get_page_prot);
105657
105658@@ -114,6 +134,7 @@ unsigned long sysctl_overcommit_kbytes __read_mostly;
105659 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
105660 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
105661 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
105662+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
105663 /*
105664 * Make sure vm_committed_as in one cacheline and not cacheline shared with
105665 * other variables. It can be updated by several CPUs frequently.
105666@@ -271,6 +292,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
105667 struct vm_area_struct *next = vma->vm_next;
105668
105669 might_sleep();
105670+ BUG_ON(vma->vm_mirror);
105671 if (vma->vm_ops && vma->vm_ops->close)
105672 vma->vm_ops->close(vma);
105673 if (vma->vm_file)
105674@@ -284,6 +306,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len);
105675
105676 SYSCALL_DEFINE1(brk, unsigned long, brk)
105677 {
105678+ unsigned long rlim;
105679 unsigned long retval;
105680 unsigned long newbrk, oldbrk;
105681 struct mm_struct *mm = current->mm;
105682@@ -314,7 +337,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
105683 * segment grow beyond its set limit the in case where the limit is
105684 * not page aligned -Ram Gupta
105685 */
105686- if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
105687+ rlim = rlimit(RLIMIT_DATA);
105688+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
105689+ /* force a minimum 16MB brk heap on setuid/setgid binaries */
105690+ if (rlim < PAGE_SIZE && (get_dumpable(mm) != SUID_DUMP_USER) && gr_is_global_nonroot(current_uid()))
105691+ rlim = 4096 * PAGE_SIZE;
105692+#endif
105693+ if (check_data_rlimit(rlim, brk, mm->start_brk,
105694 mm->end_data, mm->start_data))
105695 goto out;
105696
105697@@ -967,6 +996,12 @@ static int
105698 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
105699 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
105700 {
105701+
105702+#ifdef CONFIG_PAX_SEGMEXEC
105703+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
105704+ return 0;
105705+#endif
105706+
105707 if (is_mergeable_vma(vma, file, vm_flags) &&
105708 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
105709 if (vma->vm_pgoff == vm_pgoff)
105710@@ -986,6 +1021,12 @@ static int
105711 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
105712 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
105713 {
105714+
105715+#ifdef CONFIG_PAX_SEGMEXEC
105716+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
105717+ return 0;
105718+#endif
105719+
105720 if (is_mergeable_vma(vma, file, vm_flags) &&
105721 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
105722 pgoff_t vm_pglen;
105723@@ -1035,6 +1076,13 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
105724 struct vm_area_struct *area, *next;
105725 int err;
105726
105727+#ifdef CONFIG_PAX_SEGMEXEC
105728+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
105729+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
105730+
105731+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
105732+#endif
105733+
105734 /*
105735 * We later require that vma->vm_flags == vm_flags,
105736 * so this tests vma->vm_flags & VM_SPECIAL, too.
105737@@ -1050,6 +1098,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
105738 if (next && next->vm_end == end) /* cases 6, 7, 8 */
105739 next = next->vm_next;
105740
105741+#ifdef CONFIG_PAX_SEGMEXEC
105742+ if (prev)
105743+ prev_m = pax_find_mirror_vma(prev);
105744+ if (area)
105745+ area_m = pax_find_mirror_vma(area);
105746+ if (next)
105747+ next_m = pax_find_mirror_vma(next);
105748+#endif
105749+
105750 /*
105751 * Can it merge with the predecessor?
105752 */
105753@@ -1069,9 +1126,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
105754 /* cases 1, 6 */
105755 err = vma_adjust(prev, prev->vm_start,
105756 next->vm_end, prev->vm_pgoff, NULL);
105757- } else /* cases 2, 5, 7 */
105758+
105759+#ifdef CONFIG_PAX_SEGMEXEC
105760+ if (!err && prev_m)
105761+ err = vma_adjust(prev_m, prev_m->vm_start,
105762+ next_m->vm_end, prev_m->vm_pgoff, NULL);
105763+#endif
105764+
105765+ } else { /* cases 2, 5, 7 */
105766 err = vma_adjust(prev, prev->vm_start,
105767 end, prev->vm_pgoff, NULL);
105768+
105769+#ifdef CONFIG_PAX_SEGMEXEC
105770+ if (!err && prev_m)
105771+ err = vma_adjust(prev_m, prev_m->vm_start,
105772+ end_m, prev_m->vm_pgoff, NULL);
105773+#endif
105774+
105775+ }
105776 if (err)
105777 return NULL;
105778 khugepaged_enter_vma_merge(prev, vm_flags);
105779@@ -1085,12 +1157,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
105780 mpol_equal(policy, vma_policy(next)) &&
105781 can_vma_merge_before(next, vm_flags,
105782 anon_vma, file, pgoff+pglen)) {
105783- if (prev && addr < prev->vm_end) /* case 4 */
105784+ if (prev && addr < prev->vm_end) { /* case 4 */
105785 err = vma_adjust(prev, prev->vm_start,
105786 addr, prev->vm_pgoff, NULL);
105787- else /* cases 3, 8 */
105788+
105789+#ifdef CONFIG_PAX_SEGMEXEC
105790+ if (!err && prev_m)
105791+ err = vma_adjust(prev_m, prev_m->vm_start,
105792+ addr_m, prev_m->vm_pgoff, NULL);
105793+#endif
105794+
105795+ } else { /* cases 3, 8 */
105796 err = vma_adjust(area, addr, next->vm_end,
105797 next->vm_pgoff - pglen, NULL);
105798+
105799+#ifdef CONFIG_PAX_SEGMEXEC
105800+ if (!err && area_m)
105801+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
105802+ next_m->vm_pgoff - pglen, NULL);
105803+#endif
105804+
105805+ }
105806 if (err)
105807 return NULL;
105808 khugepaged_enter_vma_merge(area, vm_flags);
105809@@ -1199,8 +1286,10 @@ none:
105810 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
105811 struct file *file, long pages)
105812 {
105813- const unsigned long stack_flags
105814- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
105815+
105816+#ifdef CONFIG_PAX_RANDMMAP
105817+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
105818+#endif
105819
105820 mm->total_vm += pages;
105821
105822@@ -1208,7 +1297,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
105823 mm->shared_vm += pages;
105824 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
105825 mm->exec_vm += pages;
105826- } else if (flags & stack_flags)
105827+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
105828 mm->stack_vm += pages;
105829 }
105830 #endif /* CONFIG_PROC_FS */
105831@@ -1238,6 +1327,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
105832 locked += mm->locked_vm;
105833 lock_limit = rlimit(RLIMIT_MEMLOCK);
105834 lock_limit >>= PAGE_SHIFT;
105835+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
105836 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
105837 return -EAGAIN;
105838 }
105839@@ -1264,7 +1354,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
105840 * (the exception is when the underlying filesystem is noexec
105841 * mounted, in which case we dont add PROT_EXEC.)
105842 */
105843- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
105844+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
105845 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
105846 prot |= PROT_EXEC;
105847
105848@@ -1290,7 +1380,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
105849 /* Obtain the address to map to. we verify (or select) it and ensure
105850 * that it represents a valid section of the address space.
105851 */
105852- addr = get_unmapped_area(file, addr, len, pgoff, flags);
105853+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
105854 if (addr & ~PAGE_MASK)
105855 return addr;
105856
105857@@ -1301,6 +1391,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
105858 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
105859 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
105860
105861+#ifdef CONFIG_PAX_MPROTECT
105862+ if (mm->pax_flags & MF_PAX_MPROTECT) {
105863+
105864+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
105865+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
105866+ mm->binfmt->handle_mmap)
105867+ mm->binfmt->handle_mmap(file);
105868+#endif
105869+
105870+#ifndef CONFIG_PAX_MPROTECT_COMPAT
105871+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
105872+ gr_log_rwxmmap(file);
105873+
105874+#ifdef CONFIG_PAX_EMUPLT
105875+ vm_flags &= ~VM_EXEC;
105876+#else
105877+ return -EPERM;
105878+#endif
105879+
105880+ }
105881+
105882+ if (!(vm_flags & VM_EXEC))
105883+ vm_flags &= ~VM_MAYEXEC;
105884+#else
105885+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
105886+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
105887+#endif
105888+ else
105889+ vm_flags &= ~VM_MAYWRITE;
105890+ }
105891+#endif
105892+
105893+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
105894+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
105895+ vm_flags &= ~VM_PAGEEXEC;
105896+#endif
105897+
105898 if (flags & MAP_LOCKED)
105899 if (!can_do_mlock())
105900 return -EPERM;
105901@@ -1388,6 +1515,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
105902 vm_flags |= VM_NORESERVE;
105903 }
105904
105905+ if (!gr_acl_handle_mmap(file, prot))
105906+ return -EACCES;
105907+
105908 addr = mmap_region(file, addr, len, vm_flags, pgoff);
105909 if (!IS_ERR_VALUE(addr) &&
105910 ((vm_flags & VM_LOCKED) ||
105911@@ -1481,7 +1611,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
105912 vm_flags_t vm_flags = vma->vm_flags;
105913
105914 /* If it was private or non-writable, the write bit is already clear */
105915- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
105916+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
105917 return 0;
105918
105919 /* The backer wishes to know when pages are first written to? */
105920@@ -1532,7 +1662,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
105921 struct rb_node **rb_link, *rb_parent;
105922 unsigned long charged = 0;
105923
105924+#ifdef CONFIG_PAX_SEGMEXEC
105925+ struct vm_area_struct *vma_m = NULL;
105926+#endif
105927+
105928+ /*
105929+ * mm->mmap_sem is required to protect against another thread
105930+ * changing the mappings in case we sleep.
105931+ */
105932+ verify_mm_writelocked(mm);
105933+
105934 /* Check against address space limit. */
105935+
105936+#ifdef CONFIG_PAX_RANDMMAP
105937+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
105938+#endif
105939+
105940 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
105941 unsigned long nr_pages;
105942
105943@@ -1551,11 +1696,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
105944
105945 /* Clear old maps */
105946 error = -ENOMEM;
105947-munmap_back:
105948 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
105949 if (do_munmap(mm, addr, len))
105950 return -ENOMEM;
105951- goto munmap_back;
105952+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
105953 }
105954
105955 /*
105956@@ -1586,6 +1730,16 @@ munmap_back:
105957 goto unacct_error;
105958 }
105959
105960+#ifdef CONFIG_PAX_SEGMEXEC
105961+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
105962+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
105963+ if (!vma_m) {
105964+ error = -ENOMEM;
105965+ goto free_vma;
105966+ }
105967+ }
105968+#endif
105969+
105970 vma->vm_mm = mm;
105971 vma->vm_start = addr;
105972 vma->vm_end = addr + len;
105973@@ -1616,6 +1770,13 @@ munmap_back:
105974 if (error)
105975 goto unmap_and_free_vma;
105976
105977+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
105978+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
105979+ vma->vm_flags |= VM_PAGEEXEC;
105980+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
105981+ }
105982+#endif
105983+
105984 /* Can addr have changed??
105985 *
105986 * Answer: Yes, several device drivers can do it in their
105987@@ -1634,6 +1795,12 @@ munmap_back:
105988 }
105989
105990 vma_link(mm, vma, prev, rb_link, rb_parent);
105991+
105992+#ifdef CONFIG_PAX_SEGMEXEC
105993+ if (vma_m)
105994+ BUG_ON(pax_mirror_vma(vma_m, vma));
105995+#endif
105996+
105997 /* Once vma denies write, undo our temporary denial count */
105998 if (file) {
105999 if (vm_flags & VM_SHARED)
106000@@ -1646,6 +1813,7 @@ out:
106001 perf_event_mmap(vma);
106002
106003 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
106004+ track_exec_limit(mm, addr, addr + len, vm_flags);
106005 if (vm_flags & VM_LOCKED) {
106006 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
106007 vma == get_gate_vma(current->mm)))
106008@@ -1683,6 +1851,12 @@ allow_write_and_free_vma:
106009 if (vm_flags & VM_DENYWRITE)
106010 allow_write_access(file);
106011 free_vma:
106012+
106013+#ifdef CONFIG_PAX_SEGMEXEC
106014+ if (vma_m)
106015+ kmem_cache_free(vm_area_cachep, vma_m);
106016+#endif
106017+
106018 kmem_cache_free(vm_area_cachep, vma);
106019 unacct_error:
106020 if (charged)
106021@@ -1690,7 +1864,63 @@ unacct_error:
106022 return error;
106023 }
106024
106025-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
106026+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
106027+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
106028+{
106029+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
106030+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
106031+
106032+ return 0;
106033+}
106034+#endif
106035+
106036+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
106037+{
106038+ if (!vma) {
106039+#ifdef CONFIG_STACK_GROWSUP
106040+ if (addr > sysctl_heap_stack_gap)
106041+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
106042+ else
106043+ vma = find_vma(current->mm, 0);
106044+ if (vma && (vma->vm_flags & VM_GROWSUP))
106045+ return false;
106046+#endif
106047+ return true;
106048+ }
106049+
106050+ if (addr + len > vma->vm_start)
106051+ return false;
106052+
106053+ if (vma->vm_flags & VM_GROWSDOWN)
106054+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
106055+#ifdef CONFIG_STACK_GROWSUP
106056+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
106057+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
106058+#endif
106059+ else if (offset)
106060+ return offset <= vma->vm_start - addr - len;
106061+
106062+ return true;
106063+}
106064+
106065+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
106066+{
106067+ if (vma->vm_start < len)
106068+ return -ENOMEM;
106069+
106070+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
106071+ if (offset <= vma->vm_start - len)
106072+ return vma->vm_start - len - offset;
106073+ else
106074+ return -ENOMEM;
106075+ }
106076+
106077+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
106078+ return vma->vm_start - len - sysctl_heap_stack_gap;
106079+ return -ENOMEM;
106080+}
106081+
106082+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
106083 {
106084 /*
106085 * We implement the search by looking for an rbtree node that
106086@@ -1738,11 +1968,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
106087 }
106088 }
106089
106090- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
106091+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
106092 check_current:
106093 /* Check if current node has a suitable gap */
106094 if (gap_start > high_limit)
106095 return -ENOMEM;
106096+
106097+ if (gap_end - gap_start > info->threadstack_offset)
106098+ gap_start += info->threadstack_offset;
106099+ else
106100+ gap_start = gap_end;
106101+
106102+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
106103+ if (gap_end - gap_start > sysctl_heap_stack_gap)
106104+ gap_start += sysctl_heap_stack_gap;
106105+ else
106106+ gap_start = gap_end;
106107+ }
106108+ if (vma->vm_flags & VM_GROWSDOWN) {
106109+ if (gap_end - gap_start > sysctl_heap_stack_gap)
106110+ gap_end -= sysctl_heap_stack_gap;
106111+ else
106112+ gap_end = gap_start;
106113+ }
106114 if (gap_end >= low_limit && gap_end - gap_start >= length)
106115 goto found;
106116
106117@@ -1792,7 +2040,7 @@ found:
106118 return gap_start;
106119 }
106120
106121-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
106122+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
106123 {
106124 struct mm_struct *mm = current->mm;
106125 struct vm_area_struct *vma;
106126@@ -1846,6 +2094,24 @@ check_current:
106127 gap_end = vma->vm_start;
106128 if (gap_end < low_limit)
106129 return -ENOMEM;
106130+
106131+ if (gap_end - gap_start > info->threadstack_offset)
106132+ gap_end -= info->threadstack_offset;
106133+ else
106134+ gap_end = gap_start;
106135+
106136+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
106137+ if (gap_end - gap_start > sysctl_heap_stack_gap)
106138+ gap_start += sysctl_heap_stack_gap;
106139+ else
106140+ gap_start = gap_end;
106141+ }
106142+ if (vma->vm_flags & VM_GROWSDOWN) {
106143+ if (gap_end - gap_start > sysctl_heap_stack_gap)
106144+ gap_end -= sysctl_heap_stack_gap;
106145+ else
106146+ gap_end = gap_start;
106147+ }
106148 if (gap_start <= high_limit && gap_end - gap_start >= length)
106149 goto found;
106150
106151@@ -1909,6 +2175,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
106152 struct mm_struct *mm = current->mm;
106153 struct vm_area_struct *vma;
106154 struct vm_unmapped_area_info info;
106155+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
106156
106157 if (len > TASK_SIZE - mmap_min_addr)
106158 return -ENOMEM;
106159@@ -1916,11 +2183,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
106160 if (flags & MAP_FIXED)
106161 return addr;
106162
106163+#ifdef CONFIG_PAX_RANDMMAP
106164+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
106165+#endif
106166+
106167 if (addr) {
106168 addr = PAGE_ALIGN(addr);
106169 vma = find_vma(mm, addr);
106170 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
106171- (!vma || addr + len <= vma->vm_start))
106172+ check_heap_stack_gap(vma, addr, len, offset))
106173 return addr;
106174 }
106175
106176@@ -1929,6 +2200,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
106177 info.low_limit = mm->mmap_base;
106178 info.high_limit = TASK_SIZE;
106179 info.align_mask = 0;
106180+ info.threadstack_offset = offset;
106181 return vm_unmapped_area(&info);
106182 }
106183 #endif
106184@@ -1947,6 +2219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
106185 struct mm_struct *mm = current->mm;
106186 unsigned long addr = addr0;
106187 struct vm_unmapped_area_info info;
106188+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
106189
106190 /* requested length too big for entire address space */
106191 if (len > TASK_SIZE - mmap_min_addr)
106192@@ -1955,12 +2228,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
106193 if (flags & MAP_FIXED)
106194 return addr;
106195
106196+#ifdef CONFIG_PAX_RANDMMAP
106197+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
106198+#endif
106199+
106200 /* requesting a specific address */
106201 if (addr) {
106202 addr = PAGE_ALIGN(addr);
106203 vma = find_vma(mm, addr);
106204 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
106205- (!vma || addr + len <= vma->vm_start))
106206+ check_heap_stack_gap(vma, addr, len, offset))
106207 return addr;
106208 }
106209
106210@@ -1969,6 +2246,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
106211 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
106212 info.high_limit = mm->mmap_base;
106213 info.align_mask = 0;
106214+ info.threadstack_offset = offset;
106215 addr = vm_unmapped_area(&info);
106216
106217 /*
106218@@ -1981,6 +2259,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
106219 VM_BUG_ON(addr != -ENOMEM);
106220 info.flags = 0;
106221 info.low_limit = TASK_UNMAPPED_BASE;
106222+
106223+#ifdef CONFIG_PAX_RANDMMAP
106224+ if (mm->pax_flags & MF_PAX_RANDMMAP)
106225+ info.low_limit += mm->delta_mmap;
106226+#endif
106227+
106228 info.high_limit = TASK_SIZE;
106229 addr = vm_unmapped_area(&info);
106230 }
106231@@ -2081,6 +2365,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
106232 return vma;
106233 }
106234
106235+#ifdef CONFIG_PAX_SEGMEXEC
106236+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
106237+{
106238+ struct vm_area_struct *vma_m;
106239+
106240+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
106241+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
106242+ BUG_ON(vma->vm_mirror);
106243+ return NULL;
106244+ }
106245+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
106246+ vma_m = vma->vm_mirror;
106247+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
106248+ BUG_ON(vma->vm_file != vma_m->vm_file);
106249+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
106250+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
106251+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
106252+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
106253+ return vma_m;
106254+}
106255+#endif
106256+
106257 /*
106258 * Verify that the stack growth is acceptable and
106259 * update accounting. This is shared with both the
106260@@ -2098,8 +2404,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
106261
106262 /* Stack limit test */
106263 actual_size = size;
106264- if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
106265- actual_size -= PAGE_SIZE;
106266+ gr_learn_resource(current, RLIMIT_STACK, actual_size, 1);
106267 if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
106268 return -ENOMEM;
106269
106270@@ -2110,6 +2415,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
106271 locked = mm->locked_vm + grow;
106272 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
106273 limit >>= PAGE_SHIFT;
106274+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
106275 if (locked > limit && !capable(CAP_IPC_LOCK))
106276 return -ENOMEM;
106277 }
106278@@ -2139,37 +2445,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
106279 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
106280 * vma is the last one with address > vma->vm_end. Have to extend vma.
106281 */
106282+#ifndef CONFIG_IA64
106283+static
106284+#endif
106285 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
106286 {
106287 int error;
106288+ bool locknext;
106289
106290 if (!(vma->vm_flags & VM_GROWSUP))
106291 return -EFAULT;
106292
106293+ /* Also guard against wrapping around to address 0. */
106294+ if (address < PAGE_ALIGN(address+1))
106295+ address = PAGE_ALIGN(address+1);
106296+ else
106297+ return -ENOMEM;
106298+
106299 /*
106300 * We must make sure the anon_vma is allocated
106301 * so that the anon_vma locking is not a noop.
106302 */
106303 if (unlikely(anon_vma_prepare(vma)))
106304 return -ENOMEM;
106305+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
106306+ if (locknext && anon_vma_prepare(vma->vm_next))
106307+ return -ENOMEM;
106308 vma_lock_anon_vma(vma);
106309+ if (locknext)
106310+ vma_lock_anon_vma(vma->vm_next);
106311
106312 /*
106313 * vma->vm_start/vm_end cannot change under us because the caller
106314 * is required to hold the mmap_sem in read mode. We need the
106315- * anon_vma lock to serialize against concurrent expand_stacks.
106316- * Also guard against wrapping around to address 0.
106317+ * anon_vma locks to serialize against concurrent expand_stacks
106318+ * and expand_upwards.
106319 */
106320- if (address < PAGE_ALIGN(address+4))
106321- address = PAGE_ALIGN(address+4);
106322- else {
106323- vma_unlock_anon_vma(vma);
106324- return -ENOMEM;
106325- }
106326 error = 0;
106327
106328 /* Somebody else might have raced and expanded it already */
106329- if (address > vma->vm_end) {
106330+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
106331+ error = -ENOMEM;
106332+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
106333 unsigned long size, grow;
106334
106335 size = address - vma->vm_start;
106336@@ -2204,6 +2521,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
106337 }
106338 }
106339 }
106340+ if (locknext)
106341+ vma_unlock_anon_vma(vma->vm_next);
106342 vma_unlock_anon_vma(vma);
106343 khugepaged_enter_vma_merge(vma, vma->vm_flags);
106344 validate_mm(vma->vm_mm);
106345@@ -2218,6 +2537,8 @@ int expand_downwards(struct vm_area_struct *vma,
106346 unsigned long address)
106347 {
106348 int error;
106349+ bool lockprev = false;
106350+ struct vm_area_struct *prev;
106351
106352 /*
106353 * We must make sure the anon_vma is allocated
106354@@ -2231,6 +2552,15 @@ int expand_downwards(struct vm_area_struct *vma,
106355 if (error)
106356 return error;
106357
106358+ prev = vma->vm_prev;
106359+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
106360+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
106361+#endif
106362+ if (lockprev && anon_vma_prepare(prev))
106363+ return -ENOMEM;
106364+ if (lockprev)
106365+ vma_lock_anon_vma(prev);
106366+
106367 vma_lock_anon_vma(vma);
106368
106369 /*
106370@@ -2240,9 +2570,17 @@ int expand_downwards(struct vm_area_struct *vma,
106371 */
106372
106373 /* Somebody else might have raced and expanded it already */
106374- if (address < vma->vm_start) {
106375+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
106376+ error = -ENOMEM;
106377+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
106378 unsigned long size, grow;
106379
106380+#ifdef CONFIG_PAX_SEGMEXEC
106381+ struct vm_area_struct *vma_m;
106382+
106383+ vma_m = pax_find_mirror_vma(vma);
106384+#endif
106385+
106386 size = vma->vm_end - address;
106387 grow = (vma->vm_start - address) >> PAGE_SHIFT;
106388
106389@@ -2267,13 +2605,27 @@ int expand_downwards(struct vm_area_struct *vma,
106390 vma->vm_pgoff -= grow;
106391 anon_vma_interval_tree_post_update_vma(vma);
106392 vma_gap_update(vma);
106393+
106394+#ifdef CONFIG_PAX_SEGMEXEC
106395+ if (vma_m) {
106396+ anon_vma_interval_tree_pre_update_vma(vma_m);
106397+ vma_m->vm_start -= grow << PAGE_SHIFT;
106398+ vma_m->vm_pgoff -= grow;
106399+ anon_vma_interval_tree_post_update_vma(vma_m);
106400+ vma_gap_update(vma_m);
106401+ }
106402+#endif
106403+
106404 spin_unlock(&vma->vm_mm->page_table_lock);
106405
106406+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
106407 perf_event_mmap(vma);
106408 }
106409 }
106410 }
106411 vma_unlock_anon_vma(vma);
106412+ if (lockprev)
106413+ vma_unlock_anon_vma(prev);
106414 khugepaged_enter_vma_merge(vma, vma->vm_flags);
106415 validate_mm(vma->vm_mm);
106416 return error;
106417@@ -2373,6 +2725,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
106418 do {
106419 long nrpages = vma_pages(vma);
106420
106421+#ifdef CONFIG_PAX_SEGMEXEC
106422+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
106423+ vma = remove_vma(vma);
106424+ continue;
106425+ }
106426+#endif
106427+
106428 if (vma->vm_flags & VM_ACCOUNT)
106429 nr_accounted += nrpages;
106430 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
106431@@ -2417,6 +2776,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
106432 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
106433 vma->vm_prev = NULL;
106434 do {
106435+
106436+#ifdef CONFIG_PAX_SEGMEXEC
106437+ if (vma->vm_mirror) {
106438+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
106439+ vma->vm_mirror->vm_mirror = NULL;
106440+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
106441+ vma->vm_mirror = NULL;
106442+ }
106443+#endif
106444+
106445 vma_rb_erase(vma, &mm->mm_rb);
106446 mm->map_count--;
106447 tail_vma = vma;
106448@@ -2444,14 +2813,33 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
106449 struct vm_area_struct *new;
106450 int err = -ENOMEM;
106451
106452+#ifdef CONFIG_PAX_SEGMEXEC
106453+ struct vm_area_struct *vma_m, *new_m = NULL;
106454+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
106455+#endif
106456+
106457 if (is_vm_hugetlb_page(vma) && (addr &
106458 ~(huge_page_mask(hstate_vma(vma)))))
106459 return -EINVAL;
106460
106461+#ifdef CONFIG_PAX_SEGMEXEC
106462+ vma_m = pax_find_mirror_vma(vma);
106463+#endif
106464+
106465 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
106466 if (!new)
106467 goto out_err;
106468
106469+#ifdef CONFIG_PAX_SEGMEXEC
106470+ if (vma_m) {
106471+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
106472+ if (!new_m) {
106473+ kmem_cache_free(vm_area_cachep, new);
106474+ goto out_err;
106475+ }
106476+ }
106477+#endif
106478+
106479 /* most fields are the same, copy all, and then fixup */
106480 *new = *vma;
106481
106482@@ -2464,6 +2852,22 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
106483 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
106484 }
106485
106486+#ifdef CONFIG_PAX_SEGMEXEC
106487+ if (vma_m) {
106488+ *new_m = *vma_m;
106489+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
106490+ new_m->vm_mirror = new;
106491+ new->vm_mirror = new_m;
106492+
106493+ if (new_below)
106494+ new_m->vm_end = addr_m;
106495+ else {
106496+ new_m->vm_start = addr_m;
106497+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
106498+ }
106499+ }
106500+#endif
106501+
106502 err = vma_dup_policy(vma, new);
106503 if (err)
106504 goto out_free_vma;
106505@@ -2484,6 +2888,38 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
106506 else
106507 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
106508
106509+#ifdef CONFIG_PAX_SEGMEXEC
106510+ if (!err && vma_m) {
106511+ struct mempolicy *pol = vma_policy(new);
106512+
106513+ if (anon_vma_clone(new_m, vma_m))
106514+ goto out_free_mpol;
106515+
106516+ mpol_get(pol);
106517+ set_vma_policy(new_m, pol);
106518+
106519+ if (new_m->vm_file)
106520+ get_file(new_m->vm_file);
106521+
106522+ if (new_m->vm_ops && new_m->vm_ops->open)
106523+ new_m->vm_ops->open(new_m);
106524+
106525+ if (new_below)
106526+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
106527+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
106528+ else
106529+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
106530+
106531+ if (err) {
106532+ if (new_m->vm_ops && new_m->vm_ops->close)
106533+ new_m->vm_ops->close(new_m);
106534+ if (new_m->vm_file)
106535+ fput(new_m->vm_file);
106536+ mpol_put(pol);
106537+ }
106538+ }
106539+#endif
106540+
106541 /* Success. */
106542 if (!err)
106543 return 0;
106544@@ -2493,10 +2929,18 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
106545 new->vm_ops->close(new);
106546 if (new->vm_file)
106547 fput(new->vm_file);
106548- unlink_anon_vmas(new);
106549 out_free_mpol:
106550 mpol_put(vma_policy(new));
106551 out_free_vma:
106552+
106553+#ifdef CONFIG_PAX_SEGMEXEC
106554+ if (new_m) {
106555+ unlink_anon_vmas(new_m);
106556+ kmem_cache_free(vm_area_cachep, new_m);
106557+ }
106558+#endif
106559+
106560+ unlink_anon_vmas(new);
106561 kmem_cache_free(vm_area_cachep, new);
106562 out_err:
106563 return err;
106564@@ -2509,6 +2953,15 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
106565 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
106566 unsigned long addr, int new_below)
106567 {
106568+
106569+#ifdef CONFIG_PAX_SEGMEXEC
106570+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
106571+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
106572+ if (mm->map_count >= sysctl_max_map_count-1)
106573+ return -ENOMEM;
106574+ } else
106575+#endif
106576+
106577 if (mm->map_count >= sysctl_max_map_count)
106578 return -ENOMEM;
106579
106580@@ -2520,11 +2973,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
106581 * work. This now handles partial unmappings.
106582 * Jeremy Fitzhardinge <jeremy@goop.org>
106583 */
106584+#ifdef CONFIG_PAX_SEGMEXEC
106585 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
106586 {
106587+ int ret = __do_munmap(mm, start, len);
106588+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
106589+ return ret;
106590+
106591+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
106592+}
106593+
106594+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
106595+#else
106596+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
106597+#endif
106598+{
106599 unsigned long end;
106600 struct vm_area_struct *vma, *prev, *last;
106601
106602+ /*
106603+ * mm->mmap_sem is required to protect against another thread
106604+ * changing the mappings in case we sleep.
106605+ */
106606+ verify_mm_writelocked(mm);
106607+
106608 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
106609 return -EINVAL;
106610
106611@@ -2602,6 +3074,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
106612 /* Fix up all other VM information */
106613 remove_vma_list(mm, vma);
106614
106615+ track_exec_limit(mm, start, end, 0UL);
106616+
106617 return 0;
106618 }
106619
106620@@ -2610,6 +3084,13 @@ int vm_munmap(unsigned long start, size_t len)
106621 int ret;
106622 struct mm_struct *mm = current->mm;
106623
106624+
106625+#ifdef CONFIG_PAX_SEGMEXEC
106626+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
106627+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
106628+ return -EINVAL;
106629+#endif
106630+
106631 down_write(&mm->mmap_sem);
106632 ret = do_munmap(mm, start, len);
106633 up_write(&mm->mmap_sem);
106634@@ -2656,6 +3137,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
106635 down_write(&mm->mmap_sem);
106636 vma = find_vma(mm, start);
106637
106638+#ifdef CONFIG_PAX_SEGMEXEC
106639+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
106640+ goto out;
106641+#endif
106642+
106643 if (!vma || !(vma->vm_flags & VM_SHARED))
106644 goto out;
106645
106646@@ -2692,16 +3178,6 @@ out:
106647 return ret;
106648 }
106649
106650-static inline void verify_mm_writelocked(struct mm_struct *mm)
106651-{
106652-#ifdef CONFIG_DEBUG_VM
106653- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
106654- WARN_ON(1);
106655- up_read(&mm->mmap_sem);
106656- }
106657-#endif
106658-}
106659-
106660 /*
106661 * this is really a simplified "do_mmap". it only handles
106662 * anonymous maps. eventually we may be able to do some
106663@@ -2715,6 +3191,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
106664 struct rb_node **rb_link, *rb_parent;
106665 pgoff_t pgoff = addr >> PAGE_SHIFT;
106666 int error;
106667+ unsigned long charged;
106668
106669 len = PAGE_ALIGN(len);
106670 if (!len)
106671@@ -2722,10 +3199,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
106672
106673 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
106674
106675+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
106676+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
106677+ flags &= ~VM_EXEC;
106678+
106679+#ifdef CONFIG_PAX_MPROTECT
106680+ if (mm->pax_flags & MF_PAX_MPROTECT)
106681+ flags &= ~VM_MAYEXEC;
106682+#endif
106683+
106684+ }
106685+#endif
106686+
106687 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
106688 if (error & ~PAGE_MASK)
106689 return error;
106690
106691+ charged = len >> PAGE_SHIFT;
106692+
106693 error = mlock_future_check(mm, mm->def_flags, len);
106694 if (error)
106695 return error;
106696@@ -2739,21 +3230,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
106697 /*
106698 * Clear old maps. this also does some error checking for us
106699 */
106700- munmap_back:
106701 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
106702 if (do_munmap(mm, addr, len))
106703 return -ENOMEM;
106704- goto munmap_back;
106705+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
106706 }
106707
106708 /* Check against address space limits *after* clearing old maps... */
106709- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
106710+ if (!may_expand_vm(mm, charged))
106711 return -ENOMEM;
106712
106713 if (mm->map_count > sysctl_max_map_count)
106714 return -ENOMEM;
106715
106716- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
106717+ if (security_vm_enough_memory_mm(mm, charged))
106718 return -ENOMEM;
106719
106720 /* Can we just expand an old private anonymous mapping? */
106721@@ -2767,7 +3257,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
106722 */
106723 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
106724 if (!vma) {
106725- vm_unacct_memory(len >> PAGE_SHIFT);
106726+ vm_unacct_memory(charged);
106727 return -ENOMEM;
106728 }
106729
106730@@ -2781,10 +3271,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
106731 vma_link(mm, vma, prev, rb_link, rb_parent);
106732 out:
106733 perf_event_mmap(vma);
106734- mm->total_vm += len >> PAGE_SHIFT;
106735+ mm->total_vm += charged;
106736 if (flags & VM_LOCKED)
106737- mm->locked_vm += (len >> PAGE_SHIFT);
106738+ mm->locked_vm += charged;
106739 vma->vm_flags |= VM_SOFTDIRTY;
106740+ track_exec_limit(mm, addr, addr + len, flags);
106741 return addr;
106742 }
106743
106744@@ -2846,6 +3337,7 @@ void exit_mmap(struct mm_struct *mm)
106745 while (vma) {
106746 if (vma->vm_flags & VM_ACCOUNT)
106747 nr_accounted += vma_pages(vma);
106748+ vma->vm_mirror = NULL;
106749 vma = remove_vma(vma);
106750 }
106751 vm_unacct_memory(nr_accounted);
106752@@ -2860,6 +3352,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
106753 struct vm_area_struct *prev;
106754 struct rb_node **rb_link, *rb_parent;
106755
106756+#ifdef CONFIG_PAX_SEGMEXEC
106757+ struct vm_area_struct *vma_m = NULL;
106758+#endif
106759+
106760+ if (security_mmap_addr(vma->vm_start))
106761+ return -EPERM;
106762+
106763 /*
106764 * The vm_pgoff of a purely anonymous vma should be irrelevant
106765 * until its first write fault, when page's anon_vma and index
106766@@ -2883,7 +3382,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
106767 security_vm_enough_memory_mm(mm, vma_pages(vma)))
106768 return -ENOMEM;
106769
106770+#ifdef CONFIG_PAX_SEGMEXEC
106771+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
106772+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
106773+ if (!vma_m)
106774+ return -ENOMEM;
106775+ }
106776+#endif
106777+
106778 vma_link(mm, vma, prev, rb_link, rb_parent);
106779+
106780+#ifdef CONFIG_PAX_SEGMEXEC
106781+ if (vma_m)
106782+ BUG_ON(pax_mirror_vma(vma_m, vma));
106783+#endif
106784+
106785 return 0;
106786 }
106787
106788@@ -2902,6 +3415,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
106789 struct rb_node **rb_link, *rb_parent;
106790 bool faulted_in_anon_vma = true;
106791
106792+ BUG_ON(vma->vm_mirror);
106793+
106794 /*
106795 * If anonymous vma has not yet been faulted, update new pgoff
106796 * to match new location, to increase its chance of merging.
106797@@ -2966,6 +3481,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
106798 return NULL;
106799 }
106800
106801+#ifdef CONFIG_PAX_SEGMEXEC
106802+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
106803+{
106804+ struct vm_area_struct *prev_m;
106805+ struct rb_node **rb_link_m, *rb_parent_m;
106806+ struct mempolicy *pol_m;
106807+
106808+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
106809+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
106810+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
106811+ *vma_m = *vma;
106812+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
106813+ if (anon_vma_clone(vma_m, vma))
106814+ return -ENOMEM;
106815+ pol_m = vma_policy(vma_m);
106816+ mpol_get(pol_m);
106817+ set_vma_policy(vma_m, pol_m);
106818+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
106819+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
106820+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
106821+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
106822+ if (vma_m->vm_file)
106823+ get_file(vma_m->vm_file);
106824+ if (vma_m->vm_ops && vma_m->vm_ops->open)
106825+ vma_m->vm_ops->open(vma_m);
106826+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
106827+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
106828+ vma_m->vm_mirror = vma;
106829+ vma->vm_mirror = vma_m;
106830+ return 0;
106831+}
106832+#endif
106833+
106834 /*
106835 * Return true if the calling process may expand its vm space by the passed
106836 * number of pages
106837@@ -2977,6 +3525,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
106838
106839 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
106840
106841+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
106842 if (cur + npages > lim)
106843 return 0;
106844 return 1;
106845@@ -3059,6 +3608,22 @@ static struct vm_area_struct *__install_special_mapping(
106846 vma->vm_start = addr;
106847 vma->vm_end = addr + len;
106848
106849+#ifdef CONFIG_PAX_MPROTECT
106850+ if (mm->pax_flags & MF_PAX_MPROTECT) {
106851+#ifndef CONFIG_PAX_MPROTECT_COMPAT
106852+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
106853+ return ERR_PTR(-EPERM);
106854+ if (!(vm_flags & VM_EXEC))
106855+ vm_flags &= ~VM_MAYEXEC;
106856+#else
106857+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
106858+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
106859+#endif
106860+ else
106861+ vm_flags &= ~VM_MAYWRITE;
106862+ }
106863+#endif
106864+
106865 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
106866 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
106867
106868diff --git a/mm/mprotect.c b/mm/mprotect.c
106869index 8858483..8145fa5 100644
106870--- a/mm/mprotect.c
106871+++ b/mm/mprotect.c
106872@@ -24,10 +24,18 @@
106873 #include <linux/migrate.h>
106874 #include <linux/perf_event.h>
106875 #include <linux/ksm.h>
106876+#include <linux/sched/sysctl.h>
106877+
106878+#ifdef CONFIG_PAX_MPROTECT
106879+#include <linux/elf.h>
106880+#include <linux/binfmts.h>
106881+#endif
106882+
106883 #include <asm/uaccess.h>
106884 #include <asm/pgtable.h>
106885 #include <asm/cacheflush.h>
106886 #include <asm/tlbflush.h>
106887+#include <asm/mmu_context.h>
106888
106889 /*
106890 * For a prot_numa update we only hold mmap_sem for read so there is a
106891@@ -252,6 +260,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
106892 return pages;
106893 }
106894
106895+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
106896+/* called while holding the mmap semaphor for writing except stack expansion */
106897+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
106898+{
106899+ unsigned long oldlimit, newlimit = 0UL;
106900+
106901+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
106902+ return;
106903+
106904+ spin_lock(&mm->page_table_lock);
106905+ oldlimit = mm->context.user_cs_limit;
106906+ if ((prot & VM_EXEC) && oldlimit < end)
106907+ /* USER_CS limit moved up */
106908+ newlimit = end;
106909+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
106910+ /* USER_CS limit moved down */
106911+ newlimit = start;
106912+
106913+ if (newlimit) {
106914+ mm->context.user_cs_limit = newlimit;
106915+
106916+#ifdef CONFIG_SMP
106917+ wmb();
106918+ cpus_clear(mm->context.cpu_user_cs_mask);
106919+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
106920+#endif
106921+
106922+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
106923+ }
106924+ spin_unlock(&mm->page_table_lock);
106925+ if (newlimit == end) {
106926+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
106927+
106928+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
106929+ if (is_vm_hugetlb_page(vma))
106930+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
106931+ else
106932+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
106933+ }
106934+}
106935+#endif
106936+
106937 int
106938 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
106939 unsigned long start, unsigned long end, unsigned long newflags)
106940@@ -264,11 +314,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
106941 int error;
106942 int dirty_accountable = 0;
106943
106944+#ifdef CONFIG_PAX_SEGMEXEC
106945+ struct vm_area_struct *vma_m = NULL;
106946+ unsigned long start_m, end_m;
106947+
106948+ start_m = start + SEGMEXEC_TASK_SIZE;
106949+ end_m = end + SEGMEXEC_TASK_SIZE;
106950+#endif
106951+
106952 if (newflags == oldflags) {
106953 *pprev = vma;
106954 return 0;
106955 }
106956
106957+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
106958+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
106959+
106960+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
106961+ return -ENOMEM;
106962+
106963+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
106964+ return -ENOMEM;
106965+ }
106966+
106967 /*
106968 * If we make a private mapping writable we increase our commit;
106969 * but (without finer accounting) cannot reduce our commit if we
106970@@ -285,6 +353,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
106971 }
106972 }
106973
106974+#ifdef CONFIG_PAX_SEGMEXEC
106975+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
106976+ if (start != vma->vm_start) {
106977+ error = split_vma(mm, vma, start, 1);
106978+ if (error)
106979+ goto fail;
106980+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
106981+ *pprev = (*pprev)->vm_next;
106982+ }
106983+
106984+ if (end != vma->vm_end) {
106985+ error = split_vma(mm, vma, end, 0);
106986+ if (error)
106987+ goto fail;
106988+ }
106989+
106990+ if (pax_find_mirror_vma(vma)) {
106991+ error = __do_munmap(mm, start_m, end_m - start_m);
106992+ if (error)
106993+ goto fail;
106994+ } else {
106995+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
106996+ if (!vma_m) {
106997+ error = -ENOMEM;
106998+ goto fail;
106999+ }
107000+ vma->vm_flags = newflags;
107001+ error = pax_mirror_vma(vma_m, vma);
107002+ if (error) {
107003+ vma->vm_flags = oldflags;
107004+ goto fail;
107005+ }
107006+ }
107007+ }
107008+#endif
107009+
107010 /*
107011 * First try to merge with previous and/or next vma.
107012 */
107013@@ -315,7 +419,19 @@ success:
107014 * vm_flags and vm_page_prot are protected by the mmap_sem
107015 * held in write mode.
107016 */
107017+
107018+#ifdef CONFIG_PAX_SEGMEXEC
107019+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
107020+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
107021+#endif
107022+
107023 vma->vm_flags = newflags;
107024+
107025+#ifdef CONFIG_PAX_MPROTECT
107026+ if (mm->binfmt && mm->binfmt->handle_mprotect)
107027+ mm->binfmt->handle_mprotect(vma, newflags);
107028+#endif
107029+
107030 dirty_accountable = vma_wants_writenotify(vma);
107031 vma_set_page_prot(vma);
107032
107033@@ -351,6 +467,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
107034 end = start + len;
107035 if (end <= start)
107036 return -ENOMEM;
107037+
107038+#ifdef CONFIG_PAX_SEGMEXEC
107039+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
107040+ if (end > SEGMEXEC_TASK_SIZE)
107041+ return -EINVAL;
107042+ } else
107043+#endif
107044+
107045+ if (end > TASK_SIZE)
107046+ return -EINVAL;
107047+
107048 if (!arch_validate_prot(prot))
107049 return -EINVAL;
107050
107051@@ -358,7 +485,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
107052 /*
107053 * Does the application expect PROT_READ to imply PROT_EXEC:
107054 */
107055- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
107056+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
107057 prot |= PROT_EXEC;
107058
107059 vm_flags = calc_vm_prot_bits(prot);
107060@@ -390,6 +517,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
107061 if (start > vma->vm_start)
107062 prev = vma;
107063
107064+#ifdef CONFIG_PAX_MPROTECT
107065+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
107066+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
107067+#endif
107068+
107069 for (nstart = start ; ; ) {
107070 unsigned long newflags;
107071
107072@@ -400,6 +532,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
107073
107074 /* newflags >> 4 shift VM_MAY% in place of VM_% */
107075 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
107076+ if (prot & (PROT_WRITE | PROT_EXEC))
107077+ gr_log_rwxmprotect(vma);
107078+
107079+ error = -EACCES;
107080+ goto out;
107081+ }
107082+
107083+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
107084 error = -EACCES;
107085 goto out;
107086 }
107087@@ -414,6 +554,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
107088 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
107089 if (error)
107090 goto out;
107091+
107092+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
107093+
107094 nstart = tmp;
107095
107096 if (nstart < prev->vm_end)
107097diff --git a/mm/mremap.c b/mm/mremap.c
107098index 2dc44b1..caa1819 100644
107099--- a/mm/mremap.c
107100+++ b/mm/mremap.c
107101@@ -142,6 +142,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
107102 continue;
107103 pte = ptep_get_and_clear(mm, old_addr, old_pte);
107104 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
107105+
107106+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
107107+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
107108+ pte = pte_exprotect(pte);
107109+#endif
107110+
107111 pte = move_soft_dirty_pte(pte);
107112 set_pte_at(mm, new_addr, new_pte, pte);
107113 }
107114@@ -350,6 +356,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
107115 if (is_vm_hugetlb_page(vma))
107116 goto Einval;
107117
107118+#ifdef CONFIG_PAX_SEGMEXEC
107119+ if (pax_find_mirror_vma(vma))
107120+ goto Einval;
107121+#endif
107122+
107123 /* We can't remap across vm area boundaries */
107124 if (old_len > vma->vm_end - addr)
107125 goto Efault;
107126@@ -405,20 +416,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
107127 unsigned long ret = -EINVAL;
107128 unsigned long charged = 0;
107129 unsigned long map_flags;
107130+ unsigned long pax_task_size = TASK_SIZE;
107131
107132 if (new_addr & ~PAGE_MASK)
107133 goto out;
107134
107135- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
107136+#ifdef CONFIG_PAX_SEGMEXEC
107137+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
107138+ pax_task_size = SEGMEXEC_TASK_SIZE;
107139+#endif
107140+
107141+ pax_task_size -= PAGE_SIZE;
107142+
107143+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
107144 goto out;
107145
107146 /* Check if the location we're moving into overlaps the
107147 * old location at all, and fail if it does.
107148 */
107149- if ((new_addr <= addr) && (new_addr+new_len) > addr)
107150- goto out;
107151-
107152- if ((addr <= new_addr) && (addr+old_len) > new_addr)
107153+ if (addr + old_len > new_addr && new_addr + new_len > addr)
107154 goto out;
107155
107156 ret = do_munmap(mm, new_addr, new_len);
107157@@ -487,6 +503,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
107158 unsigned long ret = -EINVAL;
107159 unsigned long charged = 0;
107160 bool locked = false;
107161+ unsigned long pax_task_size = TASK_SIZE;
107162
107163 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
107164 return ret;
107165@@ -508,6 +525,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
107166 if (!new_len)
107167 return ret;
107168
107169+#ifdef CONFIG_PAX_SEGMEXEC
107170+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
107171+ pax_task_size = SEGMEXEC_TASK_SIZE;
107172+#endif
107173+
107174+ pax_task_size -= PAGE_SIZE;
107175+
107176+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
107177+ old_len > pax_task_size || addr > pax_task_size-old_len)
107178+ return ret;
107179+
107180 down_write(&current->mm->mmap_sem);
107181
107182 if (flags & MREMAP_FIXED) {
107183@@ -558,6 +586,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
107184 new_addr = addr;
107185 }
107186 ret = addr;
107187+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
107188 goto out;
107189 }
107190 }
107191@@ -581,7 +610,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
107192 goto out;
107193 }
107194
107195+ map_flags = vma->vm_flags;
107196 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
107197+ if (!(ret & ~PAGE_MASK)) {
107198+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
107199+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
107200+ }
107201 }
107202 out:
107203 if (ret & ~PAGE_MASK)
107204diff --git a/mm/nommu.c b/mm/nommu.c
107205index 3fba2dc..fdad748 100644
107206--- a/mm/nommu.c
107207+++ b/mm/nommu.c
107208@@ -72,7 +72,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
107209 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
107210 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
107211 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
107212-int heap_stack_gap = 0;
107213
107214 atomic_long_t mmap_pages_allocated;
107215
107216@@ -892,15 +891,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
107217 EXPORT_SYMBOL(find_vma);
107218
107219 /*
107220- * find a VMA
107221- * - we don't extend stack VMAs under NOMMU conditions
107222- */
107223-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
107224-{
107225- return find_vma(mm, addr);
107226-}
107227-
107228-/*
107229 * expand a stack to a given address
107230 * - not supported under NOMMU conditions
107231 */
107232@@ -1585,6 +1575,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
107233
107234 /* most fields are the same, copy all, and then fixup */
107235 *new = *vma;
107236+ INIT_LIST_HEAD(&new->anon_vma_chain);
107237 *region = *vma->vm_region;
107238 new->vm_region = region;
107239
107240@@ -2007,8 +1998,8 @@ void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
107241 }
107242 EXPORT_SYMBOL(filemap_map_pages);
107243
107244-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
107245- unsigned long addr, void *buf, int len, int write)
107246+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
107247+ unsigned long addr, void *buf, size_t len, int write)
107248 {
107249 struct vm_area_struct *vma;
107250
107251@@ -2049,8 +2040,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
107252 *
107253 * The caller must hold a reference on @mm.
107254 */
107255-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
107256- void *buf, int len, int write)
107257+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
107258+ void *buf, size_t len, int write)
107259 {
107260 return __access_remote_vm(NULL, mm, addr, buf, len, write);
107261 }
107262@@ -2059,7 +2050,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
107263 * Access another process' address space.
107264 * - source/target buffer must be kernel space
107265 */
107266-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
107267+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
107268 {
107269 struct mm_struct *mm;
107270
107271diff --git a/mm/page-writeback.c b/mm/page-writeback.c
107272index ad05f2f..cee723a 100644
107273--- a/mm/page-writeback.c
107274+++ b/mm/page-writeback.c
107275@@ -664,7 +664,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
107276 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
107277 * - the bdi dirty thresh drops quickly due to change of JBOD workload
107278 */
107279-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
107280+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
107281 unsigned long thresh,
107282 unsigned long bg_thresh,
107283 unsigned long dirty,
107284diff --git a/mm/page_alloc.c b/mm/page_alloc.c
107285index 40e2942..0eb29a2 100644
107286--- a/mm/page_alloc.c
107287+++ b/mm/page_alloc.c
107288@@ -61,6 +61,7 @@
107289 #include <linux/hugetlb.h>
107290 #include <linux/sched/rt.h>
107291 #include <linux/page_owner.h>
107292+#include <linux/random.h>
107293
107294 #include <asm/sections.h>
107295 #include <asm/tlbflush.h>
107296@@ -357,7 +358,7 @@ out:
107297 * This usage means that zero-order pages may not be compound.
107298 */
107299
107300-static void free_compound_page(struct page *page)
107301+void free_compound_page(struct page *page)
107302 {
107303 __free_pages_ok(page, compound_order(page));
107304 }
107305@@ -480,7 +481,7 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
107306 __mod_zone_freepage_state(zone, (1 << order), migratetype);
107307 }
107308 #else
107309-struct page_ext_operations debug_guardpage_ops = { NULL, };
107310+struct page_ext_operations debug_guardpage_ops = { .need = NULL, .init = NULL };
107311 static inline void set_page_guard(struct zone *zone, struct page *page,
107312 unsigned int order, int migratetype) {}
107313 static inline void clear_page_guard(struct zone *zone, struct page *page,
107314@@ -783,6 +784,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
107315 bool compound = PageCompound(page);
107316 int i, bad = 0;
107317
107318+#ifdef CONFIG_PAX_MEMORY_SANITIZE
107319+ unsigned long index = 1UL << order;
107320+#endif
107321+
107322 VM_BUG_ON_PAGE(PageTail(page), page);
107323 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
107324
107325@@ -809,6 +814,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
107326 debug_check_no_obj_freed(page_address(page),
107327 PAGE_SIZE << order);
107328 }
107329+
107330+#ifdef CONFIG_PAX_MEMORY_SANITIZE
107331+ for (; index; --index)
107332+ sanitize_highpage(page + index - 1);
107333+#endif
107334+
107335 arch_free_page(page, order);
107336 kernel_map_pages(page, 1 << order, 0);
107337
107338@@ -832,6 +843,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
107339 local_irq_restore(flags);
107340 }
107341
107342+#ifdef CONFIG_PAX_LATENT_ENTROPY
107343+bool __meminitdata extra_latent_entropy;
107344+
107345+static int __init setup_pax_extra_latent_entropy(char *str)
107346+{
107347+ extra_latent_entropy = true;
107348+ return 0;
107349+}
107350+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
107351+
107352+volatile u64 latent_entropy __latent_entropy;
107353+EXPORT_SYMBOL(latent_entropy);
107354+#endif
107355+
107356 void __init __free_pages_bootmem(struct page *page, unsigned int order)
107357 {
107358 unsigned int nr_pages = 1 << order;
107359@@ -847,6 +872,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
107360 __ClearPageReserved(p);
107361 set_page_count(p, 0);
107362
107363+#ifdef CONFIG_PAX_LATENT_ENTROPY
107364+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
107365+ u64 hash = 0;
107366+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
107367+ const u64 *data = lowmem_page_address(page);
107368+
107369+ for (index = 0; index < end; index++)
107370+ hash ^= hash + data[index];
107371+ latent_entropy ^= hash;
107372+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
107373+ }
107374+#endif
107375+
107376 page_zone(page)->managed_pages += nr_pages;
107377 set_page_refcounted(page);
107378 __free_pages(page, order);
107379@@ -974,8 +1012,10 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
107380 kernel_map_pages(page, 1 << order, 1);
107381 kasan_alloc_pages(page, order);
107382
107383+#ifndef CONFIG_PAX_MEMORY_SANITIZE
107384 if (gfp_flags & __GFP_ZERO)
107385 prep_zero_page(page, order, gfp_flags);
107386+#endif
107387
107388 if (order && (gfp_flags & __GFP_COMP))
107389 prep_compound_page(page, order);
107390@@ -1699,7 +1739,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
107391 }
107392
107393 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
107394- if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
107395+ if (atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
107396 !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
107397 set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
107398
107399@@ -2018,7 +2058,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
107400 do {
107401 mod_zone_page_state(zone, NR_ALLOC_BATCH,
107402 high_wmark_pages(zone) - low_wmark_pages(zone) -
107403- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
107404+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
107405 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
107406 } while (zone++ != preferred_zone);
107407 }
107408@@ -5738,7 +5778,7 @@ static void __setup_per_zone_wmarks(void)
107409
107410 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
107411 high_wmark_pages(zone) - low_wmark_pages(zone) -
107412- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
107413+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
107414
107415 setup_zone_migrate_reserve(zone);
107416 spin_unlock_irqrestore(&zone->lock, flags);
107417diff --git a/mm/percpu.c b/mm/percpu.c
107418index 73c97a5..508ee25 100644
107419--- a/mm/percpu.c
107420+++ b/mm/percpu.c
107421@@ -131,7 +131,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
107422 static unsigned int pcpu_high_unit_cpu __read_mostly;
107423
107424 /* the address of the first chunk which starts with the kernel static area */
107425-void *pcpu_base_addr __read_mostly;
107426+void *pcpu_base_addr __read_only;
107427 EXPORT_SYMBOL_GPL(pcpu_base_addr);
107428
107429 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
107430diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
107431index b159769..d07037f 100644
107432--- a/mm/process_vm_access.c
107433+++ b/mm/process_vm_access.c
107434@@ -13,6 +13,7 @@
107435 #include <linux/uio.h>
107436 #include <linux/sched.h>
107437 #include <linux/highmem.h>
107438+#include <linux/security.h>
107439 #include <linux/ptrace.h>
107440 #include <linux/slab.h>
107441 #include <linux/syscalls.h>
107442@@ -154,19 +155,19 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
107443 ssize_t iov_len;
107444 size_t total_len = iov_iter_count(iter);
107445
107446+ return -ENOSYS; // PaX: until properly audited
107447+
107448 /*
107449 * Work out how many pages of struct pages we're going to need
107450 * when eventually calling get_user_pages
107451 */
107452 for (i = 0; i < riovcnt; i++) {
107453 iov_len = rvec[i].iov_len;
107454- if (iov_len > 0) {
107455- nr_pages_iov = ((unsigned long)rvec[i].iov_base
107456- + iov_len)
107457- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
107458- / PAGE_SIZE + 1;
107459- nr_pages = max(nr_pages, nr_pages_iov);
107460- }
107461+ if (iov_len <= 0)
107462+ continue;
107463+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
107464+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
107465+ nr_pages = max(nr_pages, nr_pages_iov);
107466 }
107467
107468 if (nr_pages == 0)
107469@@ -194,6 +195,11 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
107470 goto free_proc_pages;
107471 }
107472
107473+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
107474+ rc = -EPERM;
107475+ goto put_task_struct;
107476+ }
107477+
107478 mm = mm_access(task, PTRACE_MODE_ATTACH);
107479 if (!mm || IS_ERR(mm)) {
107480 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
107481diff --git a/mm/rmap.c b/mm/rmap.c
107482index c161a14..8a069bb 100644
107483--- a/mm/rmap.c
107484+++ b/mm/rmap.c
107485@@ -166,6 +166,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
107486 struct anon_vma *anon_vma = vma->anon_vma;
107487 struct anon_vma_chain *avc;
107488
107489+#ifdef CONFIG_PAX_SEGMEXEC
107490+ struct anon_vma_chain *avc_m = NULL;
107491+#endif
107492+
107493 might_sleep();
107494 if (unlikely(!anon_vma)) {
107495 struct mm_struct *mm = vma->vm_mm;
107496@@ -175,6 +179,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
107497 if (!avc)
107498 goto out_enomem;
107499
107500+#ifdef CONFIG_PAX_SEGMEXEC
107501+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
107502+ if (!avc_m)
107503+ goto out_enomem_free_avc;
107504+#endif
107505+
107506 anon_vma = find_mergeable_anon_vma(vma);
107507 allocated = NULL;
107508 if (!anon_vma) {
107509@@ -188,6 +198,19 @@ int anon_vma_prepare(struct vm_area_struct *vma)
107510 /* page_table_lock to protect against threads */
107511 spin_lock(&mm->page_table_lock);
107512 if (likely(!vma->anon_vma)) {
107513+
107514+#ifdef CONFIG_PAX_SEGMEXEC
107515+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
107516+
107517+ if (vma_m) {
107518+ BUG_ON(vma_m->anon_vma);
107519+ vma_m->anon_vma = anon_vma;
107520+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
107521+ anon_vma->degree++;
107522+ avc_m = NULL;
107523+ }
107524+#endif
107525+
107526 vma->anon_vma = anon_vma;
107527 anon_vma_chain_link(vma, avc, anon_vma);
107528 /* vma reference or self-parent link for new root */
107529@@ -200,12 +223,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
107530
107531 if (unlikely(allocated))
107532 put_anon_vma(allocated);
107533+
107534+#ifdef CONFIG_PAX_SEGMEXEC
107535+ if (unlikely(avc_m))
107536+ anon_vma_chain_free(avc_m);
107537+#endif
107538+
107539 if (unlikely(avc))
107540 anon_vma_chain_free(avc);
107541 }
107542 return 0;
107543
107544 out_enomem_free_avc:
107545+
107546+#ifdef CONFIG_PAX_SEGMEXEC
107547+ if (avc_m)
107548+ anon_vma_chain_free(avc_m);
107549+#endif
107550+
107551 anon_vma_chain_free(avc);
107552 out_enomem:
107553 return -ENOMEM;
107554@@ -249,7 +284,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
107555 * good chance of avoiding scanning the whole hierarchy when it searches where
107556 * page is mapped.
107557 */
107558-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
107559+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
107560 {
107561 struct anon_vma_chain *avc, *pavc;
107562 struct anon_vma *root = NULL;
107563@@ -303,7 +338,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
107564 * the corresponding VMA in the parent process is attached to.
107565 * Returns 0 on success, non-zero on failure.
107566 */
107567-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
107568+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
107569 {
107570 struct anon_vma_chain *avc;
107571 struct anon_vma *anon_vma;
107572@@ -423,8 +458,10 @@ static void anon_vma_ctor(void *data)
107573 void __init anon_vma_init(void)
107574 {
107575 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
107576- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
107577- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
107578+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
107579+ anon_vma_ctor);
107580+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
107581+ SLAB_PANIC|SLAB_NO_SANITIZE);
107582 }
107583
107584 /*
107585diff --git a/mm/shmem.c b/mm/shmem.c
107586index cf2d0ca..ec06b8b 100644
107587--- a/mm/shmem.c
107588+++ b/mm/shmem.c
107589@@ -33,7 +33,7 @@
107590 #include <linux/swap.h>
107591 #include <linux/aio.h>
107592
107593-static struct vfsmount *shm_mnt;
107594+struct vfsmount *shm_mnt;
107595
107596 #ifdef CONFIG_SHMEM
107597 /*
107598@@ -80,7 +80,7 @@ static struct vfsmount *shm_mnt;
107599 #define BOGO_DIRENT_SIZE 20
107600
107601 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
107602-#define SHORT_SYMLINK_LEN 128
107603+#define SHORT_SYMLINK_LEN 64
107604
107605 /*
107606 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
107607@@ -2555,6 +2555,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
107608 static int shmem_xattr_validate(const char *name)
107609 {
107610 struct { const char *prefix; size_t len; } arr[] = {
107611+
107612+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
107613+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
107614+#endif
107615+
107616 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
107617 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
107618 };
107619@@ -2610,6 +2615,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
107620 if (err)
107621 return err;
107622
107623+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
107624+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
107625+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
107626+ return -EOPNOTSUPP;
107627+ if (size > 8)
107628+ return -EINVAL;
107629+ }
107630+#endif
107631+
107632 return simple_xattr_set(&info->xattrs, name, value, size, flags);
107633 }
107634
107635@@ -2993,8 +3007,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
107636 int err = -ENOMEM;
107637
107638 /* Round up to L1_CACHE_BYTES to resist false sharing */
107639- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
107640- L1_CACHE_BYTES), GFP_KERNEL);
107641+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
107642 if (!sbinfo)
107643 return -ENOMEM;
107644
107645diff --git a/mm/slab.c b/mm/slab.c
107646index c4b89ea..20990be 100644
107647--- a/mm/slab.c
107648+++ b/mm/slab.c
107649@@ -314,10 +314,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
107650 if ((x)->max_freeable < i) \
107651 (x)->max_freeable = i; \
107652 } while (0)
107653-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
107654-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
107655-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
107656-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
107657+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
107658+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
107659+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
107660+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
107661+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
107662+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
107663 #else
107664 #define STATS_INC_ACTIVE(x) do { } while (0)
107665 #define STATS_DEC_ACTIVE(x) do { } while (0)
107666@@ -334,6 +336,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
107667 #define STATS_INC_ALLOCMISS(x) do { } while (0)
107668 #define STATS_INC_FREEHIT(x) do { } while (0)
107669 #define STATS_INC_FREEMISS(x) do { } while (0)
107670+#define STATS_INC_SANITIZED(x) do { } while (0)
107671+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
107672 #endif
107673
107674 #if DEBUG
107675@@ -450,7 +454,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
107676 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
107677 */
107678 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
107679- const struct page *page, void *obj)
107680+ const struct page *page, const void *obj)
107681 {
107682 u32 offset = (obj - page->s_mem);
107683 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
107684@@ -1438,7 +1442,7 @@ void __init kmem_cache_init(void)
107685 * structures first. Without this, further allocations will bug.
107686 */
107687 kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
107688- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
107689+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
107690 slab_state = PARTIAL_NODE;
107691
107692 slab_early_init = 0;
107693@@ -2059,7 +2063,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
107694
107695 cachep = find_mergeable(size, align, flags, name, ctor);
107696 if (cachep) {
107697- cachep->refcount++;
107698+ atomic_inc(&cachep->refcount);
107699
107700 /*
107701 * Adjust the object sizes so that we clear
107702@@ -3357,6 +3361,20 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
107703 struct array_cache *ac = cpu_cache_get(cachep);
107704
107705 check_irq_off();
107706+
107707+#ifdef CONFIG_PAX_MEMORY_SANITIZE
107708+ if (cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))
107709+ STATS_INC_NOT_SANITIZED(cachep);
107710+ else {
107711+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
107712+
107713+ if (cachep->ctor)
107714+ cachep->ctor(objp);
107715+
107716+ STATS_INC_SANITIZED(cachep);
107717+ }
107718+#endif
107719+
107720 kmemleak_free_recursive(objp, cachep->flags);
107721 objp = cache_free_debugcheck(cachep, objp, caller);
107722
107723@@ -3469,7 +3487,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
107724 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
107725 }
107726
107727-void *__kmalloc_node(size_t size, gfp_t flags, int node)
107728+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
107729 {
107730 return __do_kmalloc_node(size, flags, node, _RET_IP_);
107731 }
107732@@ -3489,7 +3507,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
107733 * @flags: the type of memory to allocate (see kmalloc).
107734 * @caller: function caller for debug tracking of the caller
107735 */
107736-static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
107737+static __always_inline void * __size_overflow(1) __do_kmalloc(size_t size, gfp_t flags,
107738 unsigned long caller)
107739 {
107740 struct kmem_cache *cachep;
107741@@ -3562,6 +3580,7 @@ void kfree(const void *objp)
107742
107743 if (unlikely(ZERO_OR_NULL_PTR(objp)))
107744 return;
107745+ VM_BUG_ON(!virt_addr_valid(objp));
107746 local_irq_save(flags);
107747 kfree_debugcheck(objp);
107748 c = virt_to_cache(objp);
107749@@ -3981,14 +4000,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
107750 }
107751 /* cpu stats */
107752 {
107753- unsigned long allochit = atomic_read(&cachep->allochit);
107754- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
107755- unsigned long freehit = atomic_read(&cachep->freehit);
107756- unsigned long freemiss = atomic_read(&cachep->freemiss);
107757+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
107758+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
107759+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
107760+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
107761
107762 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
107763 allochit, allocmiss, freehit, freemiss);
107764 }
107765+#ifdef CONFIG_PAX_MEMORY_SANITIZE
107766+ {
107767+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
107768+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
107769+
107770+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
107771+ }
107772+#endif
107773 #endif
107774 }
107775
107776@@ -4196,13 +4223,69 @@ static const struct file_operations proc_slabstats_operations = {
107777 static int __init slab_proc_init(void)
107778 {
107779 #ifdef CONFIG_DEBUG_SLAB_LEAK
107780- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
107781+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
107782 #endif
107783 return 0;
107784 }
107785 module_init(slab_proc_init);
107786 #endif
107787
107788+bool is_usercopy_object(const void *ptr)
107789+{
107790+ struct page *page;
107791+ struct kmem_cache *cachep;
107792+
107793+ if (ZERO_OR_NULL_PTR(ptr))
107794+ return false;
107795+
107796+ if (!slab_is_available())
107797+ return false;
107798+
107799+ if (!virt_addr_valid(ptr))
107800+ return false;
107801+
107802+ page = virt_to_head_page(ptr);
107803+
107804+ if (!PageSlab(page))
107805+ return false;
107806+
107807+ cachep = page->slab_cache;
107808+ return cachep->flags & SLAB_USERCOPY;
107809+}
107810+
107811+#ifdef CONFIG_PAX_USERCOPY
107812+const char *check_heap_object(const void *ptr, unsigned long n)
107813+{
107814+ struct page *page;
107815+ struct kmem_cache *cachep;
107816+ unsigned int objnr;
107817+ unsigned long offset;
107818+
107819+ if (ZERO_OR_NULL_PTR(ptr))
107820+ return "<null>";
107821+
107822+ if (!virt_addr_valid(ptr))
107823+ return NULL;
107824+
107825+ page = virt_to_head_page(ptr);
107826+
107827+ if (!PageSlab(page))
107828+ return NULL;
107829+
107830+ cachep = page->slab_cache;
107831+ if (!(cachep->flags & SLAB_USERCOPY))
107832+ return cachep->name;
107833+
107834+ objnr = obj_to_index(cachep, page, ptr);
107835+ BUG_ON(objnr >= cachep->num);
107836+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
107837+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
107838+ return NULL;
107839+
107840+ return cachep->name;
107841+}
107842+#endif
107843+
107844 /**
107845 * ksize - get the actual amount of memory allocated for a given object
107846 * @objp: Pointer to the object
107847diff --git a/mm/slab.h b/mm/slab.h
107848index 4c3ac12..7b2e470 100644
107849--- a/mm/slab.h
107850+++ b/mm/slab.h
107851@@ -22,7 +22,7 @@ struct kmem_cache {
107852 unsigned int align; /* Alignment as calculated */
107853 unsigned long flags; /* Active flags on the slab */
107854 const char *name; /* Slab name for sysfs */
107855- int refcount; /* Use counter */
107856+ atomic_t refcount; /* Use counter */
107857 void (*ctor)(void *); /* Called on object slot creation */
107858 struct list_head list; /* List of all slab caches on the system */
107859 };
107860@@ -66,6 +66,20 @@ extern struct list_head slab_caches;
107861 /* The slab cache that manages slab cache information */
107862 extern struct kmem_cache *kmem_cache;
107863
107864+#ifdef CONFIG_PAX_MEMORY_SANITIZE
107865+#ifdef CONFIG_X86_64
107866+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
107867+#else
107868+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
107869+#endif
107870+enum pax_sanitize_mode {
107871+ PAX_SANITIZE_SLAB_OFF = 0,
107872+ PAX_SANITIZE_SLAB_FAST,
107873+ PAX_SANITIZE_SLAB_FULL,
107874+};
107875+extern enum pax_sanitize_mode pax_sanitize_slab;
107876+#endif
107877+
107878 unsigned long calculate_alignment(unsigned long flags,
107879 unsigned long align, unsigned long size);
107880
107881@@ -114,7 +128,8 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
107882
107883 /* Legal flag mask for kmem_cache_create(), for various configurations */
107884 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
107885- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
107886+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
107887+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
107888
107889 #if defined(CONFIG_DEBUG_SLAB)
107890 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
107891@@ -315,6 +330,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
107892 return s;
107893
107894 page = virt_to_head_page(x);
107895+
107896+ BUG_ON(!PageSlab(page));
107897+
107898 cachep = page->slab_cache;
107899 if (slab_equal_or_root(cachep, s))
107900 return cachep;
107901diff --git a/mm/slab_common.c b/mm/slab_common.c
107902index 999bb34..9843aea 100644
107903--- a/mm/slab_common.c
107904+++ b/mm/slab_common.c
107905@@ -25,11 +25,35 @@
107906
107907 #include "slab.h"
107908
107909-enum slab_state slab_state;
107910+enum slab_state slab_state __read_only;
107911 LIST_HEAD(slab_caches);
107912 DEFINE_MUTEX(slab_mutex);
107913 struct kmem_cache *kmem_cache;
107914
107915+#ifdef CONFIG_PAX_MEMORY_SANITIZE
107916+enum pax_sanitize_mode pax_sanitize_slab __read_only = PAX_SANITIZE_SLAB_FAST;
107917+static int __init pax_sanitize_slab_setup(char *str)
107918+{
107919+ if (!str)
107920+ return 0;
107921+
107922+ if (!strcmp(str, "0") || !strcmp(str, "off")) {
107923+ pr_info("PaX slab sanitization: %s\n", "disabled");
107924+ pax_sanitize_slab = PAX_SANITIZE_SLAB_OFF;
107925+ } else if (!strcmp(str, "1") || !strcmp(str, "fast")) {
107926+ pr_info("PaX slab sanitization: %s\n", "fast");
107927+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FAST;
107928+ } else if (!strcmp(str, "full")) {
107929+ pr_info("PaX slab sanitization: %s\n", "full");
107930+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FULL;
107931+ } else
107932+ pr_err("PaX slab sanitization: unsupported option '%s'\n", str);
107933+
107934+ return 0;
107935+}
107936+early_param("pax_sanitize_slab", pax_sanitize_slab_setup);
107937+#endif
107938+
107939 /*
107940 * Set of flags that will prevent slab merging
107941 */
107942@@ -44,7 +68,7 @@ struct kmem_cache *kmem_cache;
107943 * Merge control. If this is set then no merging of slab caches will occur.
107944 * (Could be removed. This was introduced to pacify the merge skeptics.)
107945 */
107946-static int slab_nomerge;
107947+static int slab_nomerge = 1;
107948
107949 static int __init setup_slab_nomerge(char *str)
107950 {
107951@@ -217,7 +241,7 @@ int slab_unmergeable(struct kmem_cache *s)
107952 /*
107953 * We may have set a slab to be unmergeable during bootstrap.
107954 */
107955- if (s->refcount < 0)
107956+ if (atomic_read(&s->refcount) < 0)
107957 return 1;
107958
107959 return 0;
107960@@ -321,7 +345,7 @@ do_kmem_cache_create(const char *name, size_t object_size, size_t size,
107961 if (err)
107962 goto out_free_cache;
107963
107964- s->refcount = 1;
107965+ atomic_set(&s->refcount, 1);
107966 list_add(&s->list, &slab_caches);
107967 out:
107968 if (err)
107969@@ -386,6 +410,13 @@ kmem_cache_create(const char *name, size_t size, size_t align,
107970 */
107971 flags &= CACHE_CREATE_MASK;
107972
107973+#ifdef CONFIG_PAX_MEMORY_SANITIZE
107974+ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
107975+ flags |= SLAB_NO_SANITIZE;
107976+ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
107977+ flags &= ~SLAB_NO_SANITIZE;
107978+#endif
107979+
107980 s = __kmem_cache_alias(name, size, align, flags, ctor);
107981 if (s)
107982 goto out_unlock;
107983@@ -456,7 +487,7 @@ static void do_kmem_cache_release(struct list_head *release,
107984 rcu_barrier();
107985
107986 list_for_each_entry_safe(s, s2, release, list) {
107987-#ifdef SLAB_SUPPORTS_SYSFS
107988+#if defined(SLAB_SUPPORTS_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
107989 sysfs_slab_remove(s);
107990 #else
107991 slab_kmem_cache_release(s);
107992@@ -625,8 +656,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
107993
107994 mutex_lock(&slab_mutex);
107995
107996- s->refcount--;
107997- if (s->refcount)
107998+ if (!atomic_dec_and_test(&s->refcount))
107999 goto out_unlock;
108000
108001 for_each_memcg_cache_safe(c, c2, s) {
108002@@ -691,7 +721,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
108003 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
108004 name, size, err);
108005
108006- s->refcount = -1; /* Exempt from merging for now */
108007+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
108008 }
108009
108010 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
108011@@ -704,7 +734,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
108012
108013 create_boot_cache(s, name, size, flags);
108014 list_add(&s->list, &slab_caches);
108015- s->refcount = 1;
108016+ atomic_set(&s->refcount, 1);
108017 return s;
108018 }
108019
108020@@ -716,6 +746,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
108021 EXPORT_SYMBOL(kmalloc_dma_caches);
108022 #endif
108023
108024+#ifdef CONFIG_PAX_USERCOPY_SLABS
108025+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
108026+EXPORT_SYMBOL(kmalloc_usercopy_caches);
108027+#endif
108028+
108029 /*
108030 * Conversion table for small slabs sizes / 8 to the index in the
108031 * kmalloc array. This is necessary for slabs < 192 since we have non power
108032@@ -780,6 +815,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
108033 return kmalloc_dma_caches[index];
108034
108035 #endif
108036+
108037+#ifdef CONFIG_PAX_USERCOPY_SLABS
108038+ if (unlikely((flags & GFP_USERCOPY)))
108039+ return kmalloc_usercopy_caches[index];
108040+
108041+#endif
108042+
108043 return kmalloc_caches[index];
108044 }
108045
108046@@ -836,7 +878,7 @@ void __init create_kmalloc_caches(unsigned long flags)
108047 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
108048 if (!kmalloc_caches[i]) {
108049 kmalloc_caches[i] = create_kmalloc_cache(NULL,
108050- 1 << i, flags);
108051+ 1 << i, SLAB_USERCOPY | flags);
108052 }
108053
108054 /*
108055@@ -845,10 +887,10 @@ void __init create_kmalloc_caches(unsigned long flags)
108056 * earlier power of two caches
108057 */
108058 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
108059- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
108060+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
108061
108062 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
108063- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
108064+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
108065 }
108066
108067 /* Kmalloc array is now usable */
108068@@ -881,6 +923,23 @@ void __init create_kmalloc_caches(unsigned long flags)
108069 }
108070 }
108071 #endif
108072+
108073+#ifdef CONFIG_PAX_USERCOPY_SLABS
108074+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
108075+ struct kmem_cache *s = kmalloc_caches[i];
108076+
108077+ if (s) {
108078+ int size = kmalloc_size(i);
108079+ char *n = kasprintf(GFP_NOWAIT,
108080+ "usercopy-kmalloc-%d", size);
108081+
108082+ BUG_ON(!n);
108083+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
108084+ size, SLAB_USERCOPY | flags);
108085+ }
108086+ }
108087+#endif
108088+
108089 }
108090 #endif /* !CONFIG_SLOB */
108091
108092@@ -940,6 +999,9 @@ static void print_slabinfo_header(struct seq_file *m)
108093 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
108094 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
108095 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
108096+#ifdef CONFIG_PAX_MEMORY_SANITIZE
108097+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
108098+#endif
108099 #endif
108100 seq_putc(m, '\n');
108101 }
108102@@ -1069,7 +1131,7 @@ static int __init slab_proc_init(void)
108103 module_init(slab_proc_init);
108104 #endif /* CONFIG_SLABINFO */
108105
108106-static __always_inline void *__do_krealloc(const void *p, size_t new_size,
108107+static __always_inline void * __size_overflow(2) __do_krealloc(const void *p, size_t new_size,
108108 gfp_t flags)
108109 {
108110 void *ret;
108111diff --git a/mm/slob.c b/mm/slob.c
108112index 94a7fed..cf3fb1a 100644
108113--- a/mm/slob.c
108114+++ b/mm/slob.c
108115@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
108116 /*
108117 * Return the size of a slob block.
108118 */
108119-static slobidx_t slob_units(slob_t *s)
108120+static slobidx_t slob_units(const slob_t *s)
108121 {
108122 if (s->units > 0)
108123 return s->units;
108124@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
108125 /*
108126 * Return the next free slob block pointer after this one.
108127 */
108128-static slob_t *slob_next(slob_t *s)
108129+static slob_t *slob_next(const slob_t *s)
108130 {
108131 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
108132 slobidx_t next;
108133@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
108134 /*
108135 * Returns true if s is the last free block in its page.
108136 */
108137-static int slob_last(slob_t *s)
108138+static int slob_last(const slob_t *s)
108139 {
108140 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
108141 }
108142
108143-static void *slob_new_pages(gfp_t gfp, int order, int node)
108144+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
108145 {
108146- void *page;
108147+ struct page *page;
108148
108149 #ifdef CONFIG_NUMA
108150 if (node != NUMA_NO_NODE)
108151@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
108152 if (!page)
108153 return NULL;
108154
108155- return page_address(page);
108156+ __SetPageSlab(page);
108157+ return page;
108158 }
108159
108160-static void slob_free_pages(void *b, int order)
108161+static void slob_free_pages(struct page *sp, int order)
108162 {
108163 if (current->reclaim_state)
108164 current->reclaim_state->reclaimed_slab += 1 << order;
108165- free_pages((unsigned long)b, order);
108166+ __ClearPageSlab(sp);
108167+ page_mapcount_reset(sp);
108168+ sp->private = 0;
108169+ __free_pages(sp, order);
108170 }
108171
108172 /*
108173@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
108174
108175 /* Not enough space: must allocate a new page */
108176 if (!b) {
108177- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
108178- if (!b)
108179+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
108180+ if (!sp)
108181 return NULL;
108182- sp = virt_to_page(b);
108183- __SetPageSlab(sp);
108184+ b = page_address(sp);
108185
108186 spin_lock_irqsave(&slob_lock, flags);
108187 sp->units = SLOB_UNITS(PAGE_SIZE);
108188 sp->freelist = b;
108189+ sp->private = 0;
108190 INIT_LIST_HEAD(&sp->lru);
108191 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
108192 set_slob_page_free(sp, slob_list);
108193@@ -337,7 +341,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
108194 /*
108195 * slob_free: entry point into the slob allocator.
108196 */
108197-static void slob_free(void *block, int size)
108198+static void slob_free(struct kmem_cache *c, void *block, int size)
108199 {
108200 struct page *sp;
108201 slob_t *prev, *next, *b = (slob_t *)block;
108202@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
108203 if (slob_page_free(sp))
108204 clear_slob_page_free(sp);
108205 spin_unlock_irqrestore(&slob_lock, flags);
108206- __ClearPageSlab(sp);
108207- page_mapcount_reset(sp);
108208- slob_free_pages(b, 0);
108209+ slob_free_pages(sp, 0);
108210 return;
108211 }
108212
108213+#ifdef CONFIG_PAX_MEMORY_SANITIZE
108214+ if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE)))
108215+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
108216+#endif
108217+
108218 if (!slob_page_free(sp)) {
108219 /* This slob page is about to become partially free. Easy! */
108220 sp->units = units;
108221@@ -424,11 +431,10 @@ out:
108222 */
108223
108224 static __always_inline void *
108225-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
108226+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
108227 {
108228- unsigned int *m;
108229- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
108230- void *ret;
108231+ slob_t *m;
108232+ void *ret = NULL;
108233
108234 gfp &= gfp_allowed_mask;
108235
108236@@ -442,27 +448,45 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
108237
108238 if (!m)
108239 return NULL;
108240- *m = size;
108241+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
108242+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
108243+ m[0].units = size;
108244+ m[1].units = align;
108245 ret = (void *)m + align;
108246
108247 trace_kmalloc_node(caller, ret,
108248 size, size + align, gfp, node);
108249 } else {
108250 unsigned int order = get_order(size);
108251+ struct page *page;
108252
108253 if (likely(order))
108254 gfp |= __GFP_COMP;
108255- ret = slob_new_pages(gfp, order, node);
108256+ page = slob_new_pages(gfp, order, node);
108257+ if (page) {
108258+ ret = page_address(page);
108259+ page->private = size;
108260+ }
108261
108262 trace_kmalloc_node(caller, ret,
108263 size, PAGE_SIZE << order, gfp, node);
108264 }
108265
108266- kmemleak_alloc(ret, size, 1, gfp);
108267 return ret;
108268 }
108269
108270-void *__kmalloc(size_t size, gfp_t gfp)
108271+static __always_inline void *
108272+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
108273+{
108274+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
108275+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
108276+
108277+ if (!ZERO_OR_NULL_PTR(ret))
108278+ kmemleak_alloc(ret, size, 1, gfp);
108279+ return ret;
108280+}
108281+
108282+void * __size_overflow(1) __kmalloc(size_t size, gfp_t gfp)
108283 {
108284 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
108285 }
108286@@ -491,34 +515,112 @@ void kfree(const void *block)
108287 return;
108288 kmemleak_free(block);
108289
108290+ VM_BUG_ON(!virt_addr_valid(block));
108291 sp = virt_to_page(block);
108292- if (PageSlab(sp)) {
108293+ VM_BUG_ON(!PageSlab(sp));
108294+ if (!sp->private) {
108295 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
108296- unsigned int *m = (unsigned int *)(block - align);
108297- slob_free(m, *m + align);
108298- } else
108299+ slob_t *m = (slob_t *)(block - align);
108300+ slob_free(NULL, m, m[0].units + align);
108301+ } else {
108302+ __ClearPageSlab(sp);
108303+ page_mapcount_reset(sp);
108304+ sp->private = 0;
108305 __free_pages(sp, compound_order(sp));
108306+ }
108307 }
108308 EXPORT_SYMBOL(kfree);
108309
108310+bool is_usercopy_object(const void *ptr)
108311+{
108312+ if (!slab_is_available())
108313+ return false;
108314+
108315+ // PAX: TODO
108316+
108317+ return false;
108318+}
108319+
108320+#ifdef CONFIG_PAX_USERCOPY
108321+const char *check_heap_object(const void *ptr, unsigned long n)
108322+{
108323+ struct page *page;
108324+ const slob_t *free;
108325+ const void *base;
108326+ unsigned long flags;
108327+
108328+ if (ZERO_OR_NULL_PTR(ptr))
108329+ return "<null>";
108330+
108331+ if (!virt_addr_valid(ptr))
108332+ return NULL;
108333+
108334+ page = virt_to_head_page(ptr);
108335+ if (!PageSlab(page))
108336+ return NULL;
108337+
108338+ if (page->private) {
108339+ base = page;
108340+ if (base <= ptr && n <= page->private - (ptr - base))
108341+ return NULL;
108342+ return "<slob>";
108343+ }
108344+
108345+ /* some tricky double walking to find the chunk */
108346+ spin_lock_irqsave(&slob_lock, flags);
108347+ base = (void *)((unsigned long)ptr & PAGE_MASK);
108348+ free = page->freelist;
108349+
108350+ while (!slob_last(free) && (void *)free <= ptr) {
108351+ base = free + slob_units(free);
108352+ free = slob_next(free);
108353+ }
108354+
108355+ while (base < (void *)free) {
108356+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
108357+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
108358+ int offset;
108359+
108360+ if (ptr < base + align)
108361+ break;
108362+
108363+ offset = ptr - base - align;
108364+ if (offset >= m) {
108365+ base += size;
108366+ continue;
108367+ }
108368+
108369+ if (n > m - offset)
108370+ break;
108371+
108372+ spin_unlock_irqrestore(&slob_lock, flags);
108373+ return NULL;
108374+ }
108375+
108376+ spin_unlock_irqrestore(&slob_lock, flags);
108377+ return "<slob>";
108378+}
108379+#endif
108380+
108381 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
108382 size_t ksize(const void *block)
108383 {
108384 struct page *sp;
108385 int align;
108386- unsigned int *m;
108387+ slob_t *m;
108388
108389 BUG_ON(!block);
108390 if (unlikely(block == ZERO_SIZE_PTR))
108391 return 0;
108392
108393 sp = virt_to_page(block);
108394- if (unlikely(!PageSlab(sp)))
108395- return PAGE_SIZE << compound_order(sp);
108396+ VM_BUG_ON(!PageSlab(sp));
108397+ if (sp->private)
108398+ return sp->private;
108399
108400 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
108401- m = (unsigned int *)(block - align);
108402- return SLOB_UNITS(*m) * SLOB_UNIT;
108403+ m = (slob_t *)(block - align);
108404+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
108405 }
108406 EXPORT_SYMBOL(ksize);
108407
108408@@ -534,23 +636,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
108409
108410 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
108411 {
108412- void *b;
108413+ void *b = NULL;
108414
108415 flags &= gfp_allowed_mask;
108416
108417 lockdep_trace_alloc(flags);
108418
108419+#ifdef CONFIG_PAX_USERCOPY_SLABS
108420+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
108421+#else
108422 if (c->size < PAGE_SIZE) {
108423 b = slob_alloc(c->size, flags, c->align, node);
108424 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
108425 SLOB_UNITS(c->size) * SLOB_UNIT,
108426 flags, node);
108427 } else {
108428- b = slob_new_pages(flags, get_order(c->size), node);
108429+ struct page *sp;
108430+
108431+ sp = slob_new_pages(flags, get_order(c->size), node);
108432+ if (sp) {
108433+ b = page_address(sp);
108434+ sp->private = c->size;
108435+ }
108436 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
108437 PAGE_SIZE << get_order(c->size),
108438 flags, node);
108439 }
108440+#endif
108441
108442 if (b && c->ctor)
108443 c->ctor(b);
108444@@ -567,7 +679,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
108445 EXPORT_SYMBOL(kmem_cache_alloc);
108446
108447 #ifdef CONFIG_NUMA
108448-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
108449+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t gfp, int node)
108450 {
108451 return __do_kmalloc_node(size, gfp, node, _RET_IP_);
108452 }
108453@@ -580,12 +692,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
108454 EXPORT_SYMBOL(kmem_cache_alloc_node);
108455 #endif
108456
108457-static void __kmem_cache_free(void *b, int size)
108458+static void __kmem_cache_free(struct kmem_cache *c, void *b, int size)
108459 {
108460- if (size < PAGE_SIZE)
108461- slob_free(b, size);
108462+ struct page *sp;
108463+
108464+ sp = virt_to_page(b);
108465+ BUG_ON(!PageSlab(sp));
108466+ if (!sp->private)
108467+ slob_free(c, b, size);
108468 else
108469- slob_free_pages(b, get_order(size));
108470+ slob_free_pages(sp, get_order(size));
108471 }
108472
108473 static void kmem_rcu_free(struct rcu_head *head)
108474@@ -593,22 +709,36 @@ static void kmem_rcu_free(struct rcu_head *head)
108475 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
108476 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
108477
108478- __kmem_cache_free(b, slob_rcu->size);
108479+ __kmem_cache_free(NULL, b, slob_rcu->size);
108480 }
108481
108482 void kmem_cache_free(struct kmem_cache *c, void *b)
108483 {
108484+ int size = c->size;
108485+
108486+#ifdef CONFIG_PAX_USERCOPY_SLABS
108487+ if (size + c->align < PAGE_SIZE) {
108488+ size += c->align;
108489+ b -= c->align;
108490+ }
108491+#endif
108492+
108493 kmemleak_free_recursive(b, c->flags);
108494 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
108495 struct slob_rcu *slob_rcu;
108496- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
108497- slob_rcu->size = c->size;
108498+ slob_rcu = b + (size - sizeof(struct slob_rcu));
108499+ slob_rcu->size = size;
108500 call_rcu(&slob_rcu->head, kmem_rcu_free);
108501 } else {
108502- __kmem_cache_free(b, c->size);
108503+ __kmem_cache_free(c, b, size);
108504 }
108505
108506+#ifdef CONFIG_PAX_USERCOPY_SLABS
108507+ trace_kfree(_RET_IP_, b);
108508+#else
108509 trace_kmem_cache_free(_RET_IP_, b);
108510+#endif
108511+
108512 }
108513 EXPORT_SYMBOL(kmem_cache_free);
108514
108515diff --git a/mm/slub.c b/mm/slub.c
108516index 82c4737..55c316a 100644
108517--- a/mm/slub.c
108518+++ b/mm/slub.c
108519@@ -198,7 +198,7 @@ struct track {
108520
108521 enum track_item { TRACK_ALLOC, TRACK_FREE };
108522
108523-#ifdef CONFIG_SYSFS
108524+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
108525 static int sysfs_slab_add(struct kmem_cache *);
108526 static int sysfs_slab_alias(struct kmem_cache *, const char *);
108527 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
108528@@ -556,7 +556,7 @@ static void print_track(const char *s, struct track *t)
108529 if (!t->addr)
108530 return;
108531
108532- pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
108533+ pr_err("INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
108534 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
108535 #ifdef CONFIG_STACKTRACE
108536 {
108537@@ -2709,6 +2709,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
108538
108539 slab_free_hook(s, x);
108540
108541+#ifdef CONFIG_PAX_MEMORY_SANITIZE
108542+ if (!(s->flags & SLAB_NO_SANITIZE)) {
108543+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
108544+ if (s->ctor)
108545+ s->ctor(x);
108546+ }
108547+#endif
108548+
108549 redo:
108550 /*
108551 * Determine the currently cpus per cpu slab.
108552@@ -3050,6 +3058,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
108553 s->inuse = size;
108554
108555 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
108556+#ifdef CONFIG_PAX_MEMORY_SANITIZE
108557+ (!(flags & SLAB_NO_SANITIZE)) ||
108558+#endif
108559 s->ctor)) {
108560 /*
108561 * Relocate free pointer after the object if it is not
108562@@ -3304,7 +3315,7 @@ static int __init setup_slub_min_objects(char *str)
108563
108564 __setup("slub_min_objects=", setup_slub_min_objects);
108565
108566-void *__kmalloc(size_t size, gfp_t flags)
108567+void * __size_overflow(1) __kmalloc(size_t size, gfp_t flags)
108568 {
108569 struct kmem_cache *s;
108570 void *ret;
108571@@ -3342,7 +3353,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
108572 return ptr;
108573 }
108574
108575-void *__kmalloc_node(size_t size, gfp_t flags, int node)
108576+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
108577 {
108578 struct kmem_cache *s;
108579 void *ret;
108580@@ -3390,6 +3401,59 @@ static size_t __ksize(const void *object)
108581 return slab_ksize(page->slab_cache);
108582 }
108583
108584+bool is_usercopy_object(const void *ptr)
108585+{
108586+ struct page *page;
108587+ struct kmem_cache *s;
108588+
108589+ if (ZERO_OR_NULL_PTR(ptr))
108590+ return false;
108591+
108592+ if (!slab_is_available())
108593+ return false;
108594+
108595+ if (!virt_addr_valid(ptr))
108596+ return false;
108597+
108598+ page = virt_to_head_page(ptr);
108599+
108600+ if (!PageSlab(page))
108601+ return false;
108602+
108603+ s = page->slab_cache;
108604+ return s->flags & SLAB_USERCOPY;
108605+}
108606+
108607+#ifdef CONFIG_PAX_USERCOPY
108608+const char *check_heap_object(const void *ptr, unsigned long n)
108609+{
108610+ struct page *page;
108611+ struct kmem_cache *s;
108612+ unsigned long offset;
108613+
108614+ if (ZERO_OR_NULL_PTR(ptr))
108615+ return "<null>";
108616+
108617+ if (!virt_addr_valid(ptr))
108618+ return NULL;
108619+
108620+ page = virt_to_head_page(ptr);
108621+
108622+ if (!PageSlab(page))
108623+ return NULL;
108624+
108625+ s = page->slab_cache;
108626+ if (!(s->flags & SLAB_USERCOPY))
108627+ return s->name;
108628+
108629+ offset = (ptr - page_address(page)) % s->size;
108630+ if (offset <= s->object_size && n <= s->object_size - offset)
108631+ return NULL;
108632+
108633+ return s->name;
108634+}
108635+#endif
108636+
108637 size_t ksize(const void *object)
108638 {
108639 size_t size = __ksize(object);
108640@@ -3410,6 +3474,7 @@ void kfree(const void *x)
108641 if (unlikely(ZERO_OR_NULL_PTR(x)))
108642 return;
108643
108644+ VM_BUG_ON(!virt_addr_valid(x));
108645 page = virt_to_head_page(x);
108646 if (unlikely(!PageSlab(page))) {
108647 BUG_ON(!PageCompound(page));
108648@@ -3726,7 +3791,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
108649
108650 s = find_mergeable(size, align, flags, name, ctor);
108651 if (s) {
108652- s->refcount++;
108653+ atomic_inc(&s->refcount);
108654
108655 /*
108656 * Adjust the object sizes so that we clear
108657@@ -3742,7 +3807,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
108658 }
108659
108660 if (sysfs_slab_alias(s, name)) {
108661- s->refcount--;
108662+ atomic_dec(&s->refcount);
108663 s = NULL;
108664 }
108665 }
108666@@ -3859,7 +3924,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
108667 }
108668 #endif
108669
108670-#ifdef CONFIG_SYSFS
108671+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
108672 static int count_inuse(struct page *page)
108673 {
108674 return page->inuse;
108675@@ -4140,7 +4205,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
108676 len += sprintf(buf + len, "%7ld ", l->count);
108677
108678 if (l->addr)
108679+#ifdef CONFIG_GRKERNSEC_HIDESYM
108680+ len += sprintf(buf + len, "%pS", NULL);
108681+#else
108682 len += sprintf(buf + len, "%pS", (void *)l->addr);
108683+#endif
108684 else
108685 len += sprintf(buf + len, "<not-available>");
108686
108687@@ -4238,12 +4307,12 @@ static void __init resiliency_test(void)
108688 validate_slab_cache(kmalloc_caches[9]);
108689 }
108690 #else
108691-#ifdef CONFIG_SYSFS
108692+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
108693 static void resiliency_test(void) {};
108694 #endif
108695 #endif
108696
108697-#ifdef CONFIG_SYSFS
108698+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
108699 enum slab_stat_type {
108700 SL_ALL, /* All slabs */
108701 SL_PARTIAL, /* Only partially allocated slabs */
108702@@ -4480,13 +4549,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
108703 {
108704 if (!s->ctor)
108705 return 0;
108706+#ifdef CONFIG_GRKERNSEC_HIDESYM
108707+ return sprintf(buf, "%pS\n", NULL);
108708+#else
108709 return sprintf(buf, "%pS\n", s->ctor);
108710+#endif
108711 }
108712 SLAB_ATTR_RO(ctor);
108713
108714 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
108715 {
108716- return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
108717+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) < 0 ? 0 : atomic_read(&s->refcount) - 1);
108718 }
108719 SLAB_ATTR_RO(aliases);
108720
108721@@ -4574,6 +4647,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
108722 SLAB_ATTR_RO(cache_dma);
108723 #endif
108724
108725+#ifdef CONFIG_PAX_USERCOPY_SLABS
108726+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
108727+{
108728+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
108729+}
108730+SLAB_ATTR_RO(usercopy);
108731+#endif
108732+
108733+#ifdef CONFIG_PAX_MEMORY_SANITIZE
108734+static ssize_t sanitize_show(struct kmem_cache *s, char *buf)
108735+{
108736+ return sprintf(buf, "%d\n", !(s->flags & SLAB_NO_SANITIZE));
108737+}
108738+SLAB_ATTR_RO(sanitize);
108739+#endif
108740+
108741 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
108742 {
108743 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
108744@@ -4629,7 +4718,7 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
108745 * as well as cause other issues like converting a mergeable
108746 * cache into an umergeable one.
108747 */
108748- if (s->refcount > 1)
108749+ if (atomic_read(&s->refcount) > 1)
108750 return -EINVAL;
108751
108752 s->flags &= ~SLAB_TRACE;
108753@@ -4749,7 +4838,7 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf)
108754 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
108755 size_t length)
108756 {
108757- if (s->refcount > 1)
108758+ if (atomic_read(&s->refcount) > 1)
108759 return -EINVAL;
108760
108761 s->flags &= ~SLAB_FAILSLAB;
108762@@ -4916,6 +5005,12 @@ static struct attribute *slab_attrs[] = {
108763 #ifdef CONFIG_ZONE_DMA
108764 &cache_dma_attr.attr,
108765 #endif
108766+#ifdef CONFIG_PAX_USERCOPY_SLABS
108767+ &usercopy_attr.attr,
108768+#endif
108769+#ifdef CONFIG_PAX_MEMORY_SANITIZE
108770+ &sanitize_attr.attr,
108771+#endif
108772 #ifdef CONFIG_NUMA
108773 &remote_node_defrag_ratio_attr.attr,
108774 #endif
108775@@ -5157,6 +5252,7 @@ static char *create_unique_id(struct kmem_cache *s)
108776 return name;
108777 }
108778
108779+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
108780 static int sysfs_slab_add(struct kmem_cache *s)
108781 {
108782 int err;
108783@@ -5230,6 +5326,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
108784 kobject_del(&s->kobj);
108785 kobject_put(&s->kobj);
108786 }
108787+#endif
108788
108789 /*
108790 * Need to buffer aliases during bootup until sysfs becomes
108791@@ -5243,6 +5340,7 @@ struct saved_alias {
108792
108793 static struct saved_alias *alias_list;
108794
108795+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
108796 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
108797 {
108798 struct saved_alias *al;
108799@@ -5265,6 +5363,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
108800 alias_list = al;
108801 return 0;
108802 }
108803+#endif
108804
108805 static int __init slab_sysfs_init(void)
108806 {
108807diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
108808index 4cba9c2..b4f9fcc 100644
108809--- a/mm/sparse-vmemmap.c
108810+++ b/mm/sparse-vmemmap.c
108811@@ -131,7 +131,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
108812 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
108813 if (!p)
108814 return NULL;
108815- pud_populate(&init_mm, pud, p);
108816+ pud_populate_kernel(&init_mm, pud, p);
108817 }
108818 return pud;
108819 }
108820@@ -143,7 +143,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
108821 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
108822 if (!p)
108823 return NULL;
108824- pgd_populate(&init_mm, pgd, p);
108825+ pgd_populate_kernel(&init_mm, pgd, p);
108826 }
108827 return pgd;
108828 }
108829diff --git a/mm/sparse.c b/mm/sparse.c
108830index d1b48b6..6e8590e 100644
108831--- a/mm/sparse.c
108832+++ b/mm/sparse.c
108833@@ -750,7 +750,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
108834
108835 for (i = 0; i < PAGES_PER_SECTION; i++) {
108836 if (PageHWPoison(&memmap[i])) {
108837- atomic_long_sub(1, &num_poisoned_pages);
108838+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
108839 ClearPageHWPoison(&memmap[i]);
108840 }
108841 }
108842diff --git a/mm/swap.c b/mm/swap.c
108843index cd3a5e6..40c0c8f 100644
108844--- a/mm/swap.c
108845+++ b/mm/swap.c
108846@@ -31,6 +31,7 @@
108847 #include <linux/memcontrol.h>
108848 #include <linux/gfp.h>
108849 #include <linux/uio.h>
108850+#include <linux/hugetlb.h>
108851
108852 #include "internal.h"
108853
108854@@ -77,6 +78,8 @@ static void __put_compound_page(struct page *page)
108855
108856 __page_cache_release(page);
108857 dtor = get_compound_page_dtor(page);
108858+ if (!PageHuge(page))
108859+ BUG_ON(dtor != free_compound_page);
108860 (*dtor)(page);
108861 }
108862
108863diff --git a/mm/swapfile.c b/mm/swapfile.c
108864index 63f55cc..31874e6 100644
108865--- a/mm/swapfile.c
108866+++ b/mm/swapfile.c
108867@@ -84,7 +84,7 @@ static DEFINE_MUTEX(swapon_mutex);
108868
108869 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
108870 /* Activity counter to indicate that a swapon or swapoff has occurred */
108871-static atomic_t proc_poll_event = ATOMIC_INIT(0);
108872+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
108873
108874 static inline unsigned char swap_count(unsigned char ent)
108875 {
108876@@ -1944,7 +1944,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
108877 spin_unlock(&swap_lock);
108878
108879 err = 0;
108880- atomic_inc(&proc_poll_event);
108881+ atomic_inc_unchecked(&proc_poll_event);
108882 wake_up_interruptible(&proc_poll_wait);
108883
108884 out_dput:
108885@@ -1961,8 +1961,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
108886
108887 poll_wait(file, &proc_poll_wait, wait);
108888
108889- if (seq->poll_event != atomic_read(&proc_poll_event)) {
108890- seq->poll_event = atomic_read(&proc_poll_event);
108891+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
108892+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
108893 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
108894 }
108895
108896@@ -2060,7 +2060,7 @@ static int swaps_open(struct inode *inode, struct file *file)
108897 return ret;
108898
108899 seq = file->private_data;
108900- seq->poll_event = atomic_read(&proc_poll_event);
108901+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
108902 return 0;
108903 }
108904
108905@@ -2520,7 +2520,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
108906 (frontswap_map) ? "FS" : "");
108907
108908 mutex_unlock(&swapon_mutex);
108909- atomic_inc(&proc_poll_event);
108910+ atomic_inc_unchecked(&proc_poll_event);
108911 wake_up_interruptible(&proc_poll_wait);
108912
108913 if (S_ISREG(inode->i_mode))
108914diff --git a/mm/util.c b/mm/util.c
108915index 3981ae9..28b585b 100644
108916--- a/mm/util.c
108917+++ b/mm/util.c
108918@@ -233,6 +233,12 @@ struct task_struct *task_of_stack(struct task_struct *task,
108919 void arch_pick_mmap_layout(struct mm_struct *mm)
108920 {
108921 mm->mmap_base = TASK_UNMAPPED_BASE;
108922+
108923+#ifdef CONFIG_PAX_RANDMMAP
108924+ if (mm->pax_flags & MF_PAX_RANDMMAP)
108925+ mm->mmap_base += mm->delta_mmap;
108926+#endif
108927+
108928 mm->get_unmapped_area = arch_get_unmapped_area;
108929 }
108930 #endif
108931@@ -403,6 +409,9 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
108932 if (!mm->arg_end)
108933 goto out_mm; /* Shh! No looking before we're done */
108934
108935+ if (gr_acl_handle_procpidmem(task))
108936+ goto out_mm;
108937+
108938 len = mm->arg_end - mm->arg_start;
108939
108940 if (len > buflen)
108941diff --git a/mm/vmalloc.c b/mm/vmalloc.c
108942index 49abccf..7bd1931 100644
108943--- a/mm/vmalloc.c
108944+++ b/mm/vmalloc.c
108945@@ -39,20 +39,65 @@ struct vfree_deferred {
108946 struct work_struct wq;
108947 };
108948 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
108949+static DEFINE_PER_CPU(struct vfree_deferred, vunmap_deferred);
108950+
108951+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
108952+struct stack_deferred_llist {
108953+ struct llist_head list;
108954+ void *stack;
108955+ void *lowmem_stack;
108956+};
108957+
108958+struct stack_deferred {
108959+ struct stack_deferred_llist list;
108960+ struct work_struct wq;
108961+};
108962+
108963+static DEFINE_PER_CPU(struct stack_deferred, stack_deferred);
108964+#endif
108965
108966 static void __vunmap(const void *, int);
108967
108968-static void free_work(struct work_struct *w)
108969+static void vfree_work(struct work_struct *w)
108970 {
108971 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
108972 struct llist_node *llnode = llist_del_all(&p->list);
108973 while (llnode) {
108974- void *p = llnode;
108975+ void *x = llnode;
108976 llnode = llist_next(llnode);
108977- __vunmap(p, 1);
108978+ __vunmap(x, 1);
108979 }
108980 }
108981
108982+static void vunmap_work(struct work_struct *w)
108983+{
108984+ struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
108985+ struct llist_node *llnode = llist_del_all(&p->list);
108986+ while (llnode) {
108987+ void *x = llnode;
108988+ llnode = llist_next(llnode);
108989+ __vunmap(x, 0);
108990+ }
108991+}
108992+
108993+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
108994+static void unmap_work(struct work_struct *w)
108995+{
108996+ struct stack_deferred *p = container_of(w, struct stack_deferred, wq);
108997+ struct llist_node *llnode = llist_del_all(&p->list.list);
108998+ while (llnode) {
108999+ struct stack_deferred_llist *x =
109000+ llist_entry((struct llist_head *)llnode,
109001+ struct stack_deferred_llist, list);
109002+ void *stack = ACCESS_ONCE(x->stack);
109003+ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack);
109004+ llnode = llist_next(llnode);
109005+ __vunmap(stack, 0);
109006+ free_kmem_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER);
109007+ }
109008+}
109009+#endif
109010+
109011 /*** Page table manipulation functions ***/
109012
109013 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
109014@@ -61,8 +106,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
109015
109016 pte = pte_offset_kernel(pmd, addr);
109017 do {
109018- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
109019- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
109020+
109021+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
109022+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
109023+ BUG_ON(!pte_exec(*pte));
109024+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
109025+ continue;
109026+ }
109027+#endif
109028+
109029+ {
109030+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
109031+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
109032+ }
109033 } while (pte++, addr += PAGE_SIZE, addr != end);
109034 }
109035
109036@@ -122,16 +178,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
109037 pte = pte_alloc_kernel(pmd, addr);
109038 if (!pte)
109039 return -ENOMEM;
109040+
109041+ pax_open_kernel();
109042 do {
109043 struct page *page = pages[*nr];
109044
109045- if (WARN_ON(!pte_none(*pte)))
109046+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
109047+ if (pgprot_val(prot) & _PAGE_NX)
109048+#endif
109049+
109050+ if (!pte_none(*pte)) {
109051+ pax_close_kernel();
109052+ WARN_ON(1);
109053 return -EBUSY;
109054- if (WARN_ON(!page))
109055+ }
109056+ if (!page) {
109057+ pax_close_kernel();
109058+ WARN_ON(1);
109059 return -ENOMEM;
109060+ }
109061 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
109062 (*nr)++;
109063 } while (pte++, addr += PAGE_SIZE, addr != end);
109064+ pax_close_kernel();
109065 return 0;
109066 }
109067
109068@@ -141,7 +210,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
109069 pmd_t *pmd;
109070 unsigned long next;
109071
109072- pmd = pmd_alloc(&init_mm, pud, addr);
109073+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
109074 if (!pmd)
109075 return -ENOMEM;
109076 do {
109077@@ -158,7 +227,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
109078 pud_t *pud;
109079 unsigned long next;
109080
109081- pud = pud_alloc(&init_mm, pgd, addr);
109082+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
109083 if (!pud)
109084 return -ENOMEM;
109085 do {
109086@@ -218,6 +287,12 @@ int is_vmalloc_or_module_addr(const void *x)
109087 if (addr >= MODULES_VADDR && addr < MODULES_END)
109088 return 1;
109089 #endif
109090+
109091+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
109092+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
109093+ return 1;
109094+#endif
109095+
109096 return is_vmalloc_addr(x);
109097 }
109098
109099@@ -238,8 +313,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
109100
109101 if (!pgd_none(*pgd)) {
109102 pud_t *pud = pud_offset(pgd, addr);
109103+#ifdef CONFIG_X86
109104+ if (!pud_large(*pud))
109105+#endif
109106 if (!pud_none(*pud)) {
109107 pmd_t *pmd = pmd_offset(pud, addr);
109108+#ifdef CONFIG_X86
109109+ if (!pmd_large(*pmd))
109110+#endif
109111 if (!pmd_none(*pmd)) {
109112 pte_t *ptep, pte;
109113
109114@@ -341,7 +422,7 @@ static void purge_vmap_area_lazy(void);
109115 * Allocate a region of KVA of the specified size and alignment, within the
109116 * vstart and vend.
109117 */
109118-static struct vmap_area *alloc_vmap_area(unsigned long size,
109119+static struct vmap_area * __size_overflow(1) alloc_vmap_area(unsigned long size,
109120 unsigned long align,
109121 unsigned long vstart, unsigned long vend,
109122 int node, gfp_t gfp_mask)
109123@@ -1182,13 +1263,27 @@ void __init vmalloc_init(void)
109124 for_each_possible_cpu(i) {
109125 struct vmap_block_queue *vbq;
109126 struct vfree_deferred *p;
109127+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
109128+ struct stack_deferred *p2;
109129+#endif
109130
109131 vbq = &per_cpu(vmap_block_queue, i);
109132 spin_lock_init(&vbq->lock);
109133 INIT_LIST_HEAD(&vbq->free);
109134+
109135 p = &per_cpu(vfree_deferred, i);
109136 init_llist_head(&p->list);
109137- INIT_WORK(&p->wq, free_work);
109138+ INIT_WORK(&p->wq, vfree_work);
109139+
109140+ p = &per_cpu(vunmap_deferred, i);
109141+ init_llist_head(&p->list);
109142+ INIT_WORK(&p->wq, vunmap_work);
109143+
109144+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
109145+ p2 = &per_cpu(stack_deferred, i);
109146+ init_llist_head(&p2->list.list);
109147+ INIT_WORK(&p2->wq, unmap_work);
109148+#endif
109149 }
109150
109151 /* Import existing vmlist entries. */
109152@@ -1313,6 +1408,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
109153 struct vm_struct *area;
109154
109155 BUG_ON(in_interrupt());
109156+
109157+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
109158+ if (flags & VM_KERNEXEC) {
109159+ if (start != VMALLOC_START || end != VMALLOC_END)
109160+ return NULL;
109161+ start = (unsigned long)MODULES_EXEC_VADDR;
109162+ end = (unsigned long)MODULES_EXEC_END;
109163+ }
109164+#endif
109165+
109166 if (flags & VM_IOREMAP)
109167 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
109168
109169@@ -1510,13 +1615,36 @@ EXPORT_SYMBOL(vfree);
109170 */
109171 void vunmap(const void *addr)
109172 {
109173- BUG_ON(in_interrupt());
109174- might_sleep();
109175- if (addr)
109176+ if (!addr)
109177+ return;
109178+ if (unlikely(in_interrupt())) {
109179+ struct vfree_deferred *p = this_cpu_ptr(&vunmap_deferred);
109180+ if (llist_add((struct llist_node *)addr, &p->list))
109181+ schedule_work(&p->wq);
109182+ } else {
109183+ might_sleep();
109184 __vunmap(addr, 0);
109185+ }
109186 }
109187 EXPORT_SYMBOL(vunmap);
109188
109189+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
109190+void unmap_process_stacks(struct task_struct *task)
109191+{
109192+ if (unlikely(in_interrupt())) {
109193+ struct stack_deferred *p = this_cpu_ptr(&stack_deferred);
109194+ struct stack_deferred_llist *list = task->stack;
109195+ list->stack = task->stack;
109196+ list->lowmem_stack = task->lowmem_stack;
109197+ if (llist_add((struct llist_node *)&list->list, &p->list.list))
109198+ schedule_work(&p->wq);
109199+ } else {
109200+ __vunmap(task->stack, 0);
109201+ free_kmem_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER);
109202+ }
109203+}
109204+#endif
109205+
109206 /**
109207 * vmap - map an array of pages into virtually contiguous space
109208 * @pages: array of page pointers
109209@@ -1537,6 +1665,11 @@ void *vmap(struct page **pages, unsigned int count,
109210 if (count > totalram_pages)
109211 return NULL;
109212
109213+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
109214+ if (!(pgprot_val(prot) & _PAGE_NX))
109215+ flags |= VM_KERNEXEC;
109216+#endif
109217+
109218 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
109219 __builtin_return_address(0));
109220 if (!area)
109221@@ -1641,6 +1774,14 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
109222 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
109223 goto fail;
109224
109225+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
109226+ if (!(pgprot_val(prot) & _PAGE_NX)) {
109227+ vm_flags |= VM_KERNEXEC;
109228+ start = VMALLOC_START;
109229+ end = VMALLOC_END;
109230+ }
109231+#endif
109232+
109233 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
109234 vm_flags, start, end, node, gfp_mask, caller);
109235 if (!area)
109236@@ -1817,10 +1958,9 @@ EXPORT_SYMBOL(vzalloc_node);
109237 * For tight control over page level allocator and protection flags
109238 * use __vmalloc() instead.
109239 */
109240-
109241 void *vmalloc_exec(unsigned long size)
109242 {
109243- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
109244+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
109245 NUMA_NO_NODE, __builtin_return_address(0));
109246 }
109247
109248@@ -2127,6 +2267,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
109249 {
109250 struct vm_struct *area;
109251
109252+ BUG_ON(vma->vm_mirror);
109253+
109254 size = PAGE_ALIGN(size);
109255
109256 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
109257@@ -2609,7 +2751,11 @@ static int s_show(struct seq_file *m, void *p)
109258 v->addr, v->addr + v->size, v->size);
109259
109260 if (v->caller)
109261+#ifdef CONFIG_GRKERNSEC_HIDESYM
109262+ seq_printf(m, " %pK", v->caller);
109263+#else
109264 seq_printf(m, " %pS", v->caller);
109265+#endif
109266
109267 if (v->nr_pages)
109268 seq_printf(m, " pages=%d", v->nr_pages);
109269diff --git a/mm/vmstat.c b/mm/vmstat.c
109270index 4f5cd97..9fb715a 100644
109271--- a/mm/vmstat.c
109272+++ b/mm/vmstat.c
109273@@ -27,6 +27,7 @@
109274 #include <linux/mm_inline.h>
109275 #include <linux/page_ext.h>
109276 #include <linux/page_owner.h>
109277+#include <linux/grsecurity.h>
109278
109279 #include "internal.h"
109280
109281@@ -86,7 +87,7 @@ void vm_events_fold_cpu(int cpu)
109282 *
109283 * vm_stat contains the global counters
109284 */
109285-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
109286+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
109287 EXPORT_SYMBOL(vm_stat);
109288
109289 #ifdef CONFIG_SMP
109290@@ -438,7 +439,7 @@ static int fold_diff(int *diff)
109291
109292 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
109293 if (diff[i]) {
109294- atomic_long_add(diff[i], &vm_stat[i]);
109295+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
109296 changes++;
109297 }
109298 return changes;
109299@@ -476,7 +477,7 @@ static int refresh_cpu_vm_stats(void)
109300 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
109301 if (v) {
109302
109303- atomic_long_add(v, &zone->vm_stat[i]);
109304+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
109305 global_diff[i] += v;
109306 #ifdef CONFIG_NUMA
109307 /* 3 seconds idle till flush */
109308@@ -540,7 +541,7 @@ void cpu_vm_stats_fold(int cpu)
109309
109310 v = p->vm_stat_diff[i];
109311 p->vm_stat_diff[i] = 0;
109312- atomic_long_add(v, &zone->vm_stat[i]);
109313+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
109314 global_diff[i] += v;
109315 }
109316 }
109317@@ -560,8 +561,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
109318 if (pset->vm_stat_diff[i]) {
109319 int v = pset->vm_stat_diff[i];
109320 pset->vm_stat_diff[i] = 0;
109321- atomic_long_add(v, &zone->vm_stat[i]);
109322- atomic_long_add(v, &vm_stat[i]);
109323+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
109324+ atomic_long_add_unchecked(v, &vm_stat[i]);
109325 }
109326 }
109327 #endif
109328@@ -1293,10 +1294,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
109329 stat_items_size += sizeof(struct vm_event_state);
109330 #endif
109331
109332- v = kmalloc(stat_items_size, GFP_KERNEL);
109333+ v = kzalloc(stat_items_size, GFP_KERNEL);
109334 m->private = v;
109335 if (!v)
109336 return ERR_PTR(-ENOMEM);
109337+
109338+#ifdef CONFIG_GRKERNSEC_PROC_ADD
109339+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
109340+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
109341+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
109342+ && !in_group_p(grsec_proc_gid)
109343+#endif
109344+ )
109345+ return (unsigned long *)m->private + *pos;
109346+#endif
109347+#endif
109348+
109349 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
109350 v[i] = global_page_state(i);
109351 v += NR_VM_ZONE_STAT_ITEMS;
109352@@ -1528,10 +1541,16 @@ static int __init setup_vmstat(void)
109353 cpu_notifier_register_done();
109354 #endif
109355 #ifdef CONFIG_PROC_FS
109356- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
109357- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
109358- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
109359- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
109360+ {
109361+ mode_t gr_mode = S_IRUGO;
109362+#ifdef CONFIG_GRKERNSEC_PROC_ADD
109363+ gr_mode = S_IRUSR;
109364+#endif
109365+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
109366+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
109367+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
109368+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
109369+ }
109370 #endif
109371 return 0;
109372 }
109373diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
109374index 64c6bed..b79a5de 100644
109375--- a/net/8021q/vlan.c
109376+++ b/net/8021q/vlan.c
109377@@ -481,7 +481,7 @@ out:
109378 return NOTIFY_DONE;
109379 }
109380
109381-static struct notifier_block vlan_notifier_block __read_mostly = {
109382+static struct notifier_block vlan_notifier_block = {
109383 .notifier_call = vlan_device_event,
109384 };
109385
109386@@ -556,8 +556,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
109387 err = -EPERM;
109388 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
109389 break;
109390- if ((args.u.name_type >= 0) &&
109391- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
109392+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
109393 struct vlan_net *vn;
109394
109395 vn = net_generic(net, vlan_net_id);
109396diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
109397index c92b52f..006c052 100644
109398--- a/net/8021q/vlan_netlink.c
109399+++ b/net/8021q/vlan_netlink.c
109400@@ -245,7 +245,7 @@ static struct net *vlan_get_link_net(const struct net_device *dev)
109401 return dev_net(real_dev);
109402 }
109403
109404-struct rtnl_link_ops vlan_link_ops __read_mostly = {
109405+struct rtnl_link_ops vlan_link_ops = {
109406 .kind = "vlan",
109407 .maxtype = IFLA_VLAN_MAX,
109408 .policy = vlan_policy,
109409diff --git a/net/9p/client.c b/net/9p/client.c
109410index e86a9bea..e91f70e 100644
109411--- a/net/9p/client.c
109412+++ b/net/9p/client.c
109413@@ -596,7 +596,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
109414 len - inline_len);
109415 } else {
109416 err = copy_from_user(ename + inline_len,
109417- uidata, len - inline_len);
109418+ (char __force_user *)uidata, len - inline_len);
109419 if (err) {
109420 err = -EFAULT;
109421 goto out_err;
109422@@ -1570,7 +1570,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
109423 kernel_buf = 1;
109424 indata = data;
109425 } else
109426- indata = (__force char *)udata;
109427+ indata = (__force_kernel char *)udata;
109428 /*
109429 * response header len is 11
109430 * PDU Header(7) + IO Size (4)
109431@@ -1645,7 +1645,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
109432 kernel_buf = 1;
109433 odata = data;
109434 } else
109435- odata = (char *)udata;
109436+ odata = (char __force_kernel *)udata;
109437 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
109438 P9_ZC_HDR_SZ, kernel_buf, "dqd",
109439 fid->fid, offset, rsize);
109440diff --git a/net/9p/mod.c b/net/9p/mod.c
109441index 6ab36ae..6f1841b 100644
109442--- a/net/9p/mod.c
109443+++ b/net/9p/mod.c
109444@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
109445 void v9fs_register_trans(struct p9_trans_module *m)
109446 {
109447 spin_lock(&v9fs_trans_lock);
109448- list_add_tail(&m->list, &v9fs_trans_list);
109449+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
109450 spin_unlock(&v9fs_trans_lock);
109451 }
109452 EXPORT_SYMBOL(v9fs_register_trans);
109453@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
109454 void v9fs_unregister_trans(struct p9_trans_module *m)
109455 {
109456 spin_lock(&v9fs_trans_lock);
109457- list_del_init(&m->list);
109458+ pax_list_del_init((struct list_head *)&m->list);
109459 spin_unlock(&v9fs_trans_lock);
109460 }
109461 EXPORT_SYMBOL(v9fs_unregister_trans);
109462diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
109463index 80d08f6..de63fd1 100644
109464--- a/net/9p/trans_fd.c
109465+++ b/net/9p/trans_fd.c
109466@@ -428,7 +428,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
109467 oldfs = get_fs();
109468 set_fs(get_ds());
109469 /* The cast to a user pointer is valid due to the set_fs() */
109470- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
109471+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
109472 set_fs(oldfs);
109473
109474 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
109475diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
109476index af46bc4..f9adfcd 100644
109477--- a/net/appletalk/atalk_proc.c
109478+++ b/net/appletalk/atalk_proc.c
109479@@ -256,7 +256,7 @@ int __init atalk_proc_init(void)
109480 struct proc_dir_entry *p;
109481 int rc = -ENOMEM;
109482
109483- atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
109484+ atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net);
109485 if (!atalk_proc_dir)
109486 goto out;
109487
109488diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
109489index 876fbe8..8bbea9f 100644
109490--- a/net/atm/atm_misc.c
109491+++ b/net/atm/atm_misc.c
109492@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
109493 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
109494 return 1;
109495 atm_return(vcc, truesize);
109496- atomic_inc(&vcc->stats->rx_drop);
109497+ atomic_inc_unchecked(&vcc->stats->rx_drop);
109498 return 0;
109499 }
109500 EXPORT_SYMBOL(atm_charge);
109501@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
109502 }
109503 }
109504 atm_return(vcc, guess);
109505- atomic_inc(&vcc->stats->rx_drop);
109506+ atomic_inc_unchecked(&vcc->stats->rx_drop);
109507 return NULL;
109508 }
109509 EXPORT_SYMBOL(atm_alloc_charge);
109510@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
109511
109512 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
109513 {
109514-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
109515+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
109516 __SONET_ITEMS
109517 #undef __HANDLE_ITEM
109518 }
109519@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
109520
109521 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
109522 {
109523-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
109524+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
109525 __SONET_ITEMS
109526 #undef __HANDLE_ITEM
109527 }
109528diff --git a/net/atm/lec.c b/net/atm/lec.c
109529index 4b98f89..5a2f6cb 100644
109530--- a/net/atm/lec.c
109531+++ b/net/atm/lec.c
109532@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
109533 }
109534
109535 static struct lane2_ops lane2_ops = {
109536- lane2_resolve, /* resolve, spec 3.1.3 */
109537- lane2_associate_req, /* associate_req, spec 3.1.4 */
109538- NULL /* associate indicator, spec 3.1.5 */
109539+ .resolve = lane2_resolve,
109540+ .associate_req = lane2_associate_req,
109541+ .associate_indicator = NULL
109542 };
109543
109544 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
109545diff --git a/net/atm/lec.h b/net/atm/lec.h
109546index 4149db1..f2ab682 100644
109547--- a/net/atm/lec.h
109548+++ b/net/atm/lec.h
109549@@ -48,7 +48,7 @@ struct lane2_ops {
109550 const u8 *tlvs, u32 sizeoftlvs);
109551 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
109552 const u8 *tlvs, u32 sizeoftlvs);
109553-};
109554+} __no_const;
109555
109556 /*
109557 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
109558diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
109559index d1b2d9a..d549f7f 100644
109560--- a/net/atm/mpoa_caches.c
109561+++ b/net/atm/mpoa_caches.c
109562@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
109563
109564
109565 static struct in_cache_ops ingress_ops = {
109566- in_cache_add_entry, /* add_entry */
109567- in_cache_get, /* get */
109568- in_cache_get_with_mask, /* get_with_mask */
109569- in_cache_get_by_vcc, /* get_by_vcc */
109570- in_cache_put, /* put */
109571- in_cache_remove_entry, /* remove_entry */
109572- cache_hit, /* cache_hit */
109573- clear_count_and_expired, /* clear_count */
109574- check_resolving_entries, /* check_resolving */
109575- refresh_entries, /* refresh */
109576- in_destroy_cache /* destroy_cache */
109577+ .add_entry = in_cache_add_entry,
109578+ .get = in_cache_get,
109579+ .get_with_mask = in_cache_get_with_mask,
109580+ .get_by_vcc = in_cache_get_by_vcc,
109581+ .put = in_cache_put,
109582+ .remove_entry = in_cache_remove_entry,
109583+ .cache_hit = cache_hit,
109584+ .clear_count = clear_count_and_expired,
109585+ .check_resolving = check_resolving_entries,
109586+ .refresh = refresh_entries,
109587+ .destroy_cache = in_destroy_cache
109588 };
109589
109590 static struct eg_cache_ops egress_ops = {
109591- eg_cache_add_entry, /* add_entry */
109592- eg_cache_get_by_cache_id, /* get_by_cache_id */
109593- eg_cache_get_by_tag, /* get_by_tag */
109594- eg_cache_get_by_vcc, /* get_by_vcc */
109595- eg_cache_get_by_src_ip, /* get_by_src_ip */
109596- eg_cache_put, /* put */
109597- eg_cache_remove_entry, /* remove_entry */
109598- update_eg_cache_entry, /* update */
109599- clear_expired, /* clear_expired */
109600- eg_destroy_cache /* destroy_cache */
109601+ .add_entry = eg_cache_add_entry,
109602+ .get_by_cache_id = eg_cache_get_by_cache_id,
109603+ .get_by_tag = eg_cache_get_by_tag,
109604+ .get_by_vcc = eg_cache_get_by_vcc,
109605+ .get_by_src_ip = eg_cache_get_by_src_ip,
109606+ .put = eg_cache_put,
109607+ .remove_entry = eg_cache_remove_entry,
109608+ .update = update_eg_cache_entry,
109609+ .clear_expired = clear_expired,
109610+ .destroy_cache = eg_destroy_cache
109611 };
109612
109613
109614diff --git a/net/atm/proc.c b/net/atm/proc.c
109615index bbb6461..cf04016 100644
109616--- a/net/atm/proc.c
109617+++ b/net/atm/proc.c
109618@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
109619 const struct k_atm_aal_stats *stats)
109620 {
109621 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
109622- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
109623- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
109624- atomic_read(&stats->rx_drop));
109625+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
109626+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
109627+ atomic_read_unchecked(&stats->rx_drop));
109628 }
109629
109630 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
109631diff --git a/net/atm/resources.c b/net/atm/resources.c
109632index 0447d5d..3cf4728 100644
109633--- a/net/atm/resources.c
109634+++ b/net/atm/resources.c
109635@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
109636 static void copy_aal_stats(struct k_atm_aal_stats *from,
109637 struct atm_aal_stats *to)
109638 {
109639-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
109640+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
109641 __AAL_STAT_ITEMS
109642 #undef __HANDLE_ITEM
109643 }
109644@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
109645 static void subtract_aal_stats(struct k_atm_aal_stats *from,
109646 struct atm_aal_stats *to)
109647 {
109648-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
109649+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
109650 __AAL_STAT_ITEMS
109651 #undef __HANDLE_ITEM
109652 }
109653diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
109654index 919a5ce..cc6b444 100644
109655--- a/net/ax25/sysctl_net_ax25.c
109656+++ b/net/ax25/sysctl_net_ax25.c
109657@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
109658 {
109659 char path[sizeof("net/ax25/") + IFNAMSIZ];
109660 int k;
109661- struct ctl_table *table;
109662+ ctl_table_no_const *table;
109663
109664 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
109665 if (!table)
109666diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
109667index 00e00e0..710fcd2 100644
109668--- a/net/batman-adv/bat_iv_ogm.c
109669+++ b/net/batman-adv/bat_iv_ogm.c
109670@@ -312,7 +312,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
109671
109672 /* randomize initial seqno to avoid collision */
109673 get_random_bytes(&random_seqno, sizeof(random_seqno));
109674- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
109675+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
109676
109677 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
109678 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
109679@@ -917,9 +917,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
109680 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
109681
109682 /* change sequence number to network order */
109683- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
109684+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
109685 batadv_ogm_packet->seqno = htonl(seqno);
109686- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
109687+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
109688
109689 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
109690
109691@@ -1596,7 +1596,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
109692 return;
109693
109694 /* could be changed by schedule_own_packet() */
109695- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
109696+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
109697
109698 if (ogm_packet->flags & BATADV_DIRECTLINK)
109699 has_directlink_flag = true;
109700diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
109701index 3d1dcaa..4699f4e 100644
109702--- a/net/batman-adv/fragmentation.c
109703+++ b/net/batman-adv/fragmentation.c
109704@@ -449,7 +449,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
109705 frag_header.packet_type = BATADV_UNICAST_FRAG;
109706 frag_header.version = BATADV_COMPAT_VERSION;
109707 frag_header.ttl = BATADV_TTL;
109708- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
109709+ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
109710 frag_header.reserved = 0;
109711 frag_header.no = 0;
109712 frag_header.total_size = htons(skb->len);
109713diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
109714index 5ec31d7..e371631 100644
109715--- a/net/batman-adv/soft-interface.c
109716+++ b/net/batman-adv/soft-interface.c
109717@@ -295,7 +295,7 @@ send:
109718 primary_if->net_dev->dev_addr);
109719
109720 /* set broadcast sequence number */
109721- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
109722+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
109723 bcast_packet->seqno = htonl(seqno);
109724
109725 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
109726@@ -760,7 +760,7 @@ static int batadv_softif_init_late(struct net_device *dev)
109727 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
109728
109729 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
109730- atomic_set(&bat_priv->bcast_seqno, 1);
109731+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
109732 atomic_set(&bat_priv->tt.vn, 0);
109733 atomic_set(&bat_priv->tt.local_changes, 0);
109734 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
109735@@ -774,7 +774,7 @@ static int batadv_softif_init_late(struct net_device *dev)
109736
109737 /* randomize initial seqno to avoid collision */
109738 get_random_bytes(&random_seqno, sizeof(random_seqno));
109739- atomic_set(&bat_priv->frag_seqno, random_seqno);
109740+ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
109741
109742 bat_priv->primary_if = NULL;
109743 bat_priv->num_ifaces = 0;
109744@@ -982,7 +982,7 @@ int batadv_softif_is_valid(const struct net_device *net_dev)
109745 return 0;
109746 }
109747
109748-struct rtnl_link_ops batadv_link_ops __read_mostly = {
109749+struct rtnl_link_ops batadv_link_ops = {
109750 .kind = "batadv",
109751 .priv_size = sizeof(struct batadv_priv),
109752 .setup = batadv_softif_init_early,
109753diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
109754index 9398c3f..0e79657 100644
109755--- a/net/batman-adv/types.h
109756+++ b/net/batman-adv/types.h
109757@@ -67,7 +67,7 @@ enum batadv_dhcp_recipient {
109758 struct batadv_hard_iface_bat_iv {
109759 unsigned char *ogm_buff;
109760 int ogm_buff_len;
109761- atomic_t ogm_seqno;
109762+ atomic_unchecked_t ogm_seqno;
109763 };
109764
109765 /**
109766@@ -766,7 +766,7 @@ struct batadv_priv {
109767 atomic_t bonding;
109768 atomic_t fragmentation;
109769 atomic_t packet_size_max;
109770- atomic_t frag_seqno;
109771+ atomic_unchecked_t frag_seqno;
109772 #ifdef CONFIG_BATMAN_ADV_BLA
109773 atomic_t bridge_loop_avoidance;
109774 #endif
109775@@ -785,7 +785,7 @@ struct batadv_priv {
109776 #endif
109777 uint32_t isolation_mark;
109778 uint32_t isolation_mark_mask;
109779- atomic_t bcast_seqno;
109780+ atomic_unchecked_t bcast_seqno;
109781 atomic_t bcast_queue_left;
109782 atomic_t batman_queue_left;
109783 char num_ifaces;
109784diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
109785index 1d65c5b..43e55fd 100644
109786--- a/net/bluetooth/hci_sock.c
109787+++ b/net/bluetooth/hci_sock.c
109788@@ -1042,7 +1042,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
109789 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
109790 }
109791
109792- len = min_t(unsigned int, len, sizeof(uf));
109793+ len = min((size_t)len, sizeof(uf));
109794 if (copy_from_user(&uf, optval, len)) {
109795 err = -EFAULT;
109796 break;
109797diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
109798index 6ba33f9..4afc26f 100644
109799--- a/net/bluetooth/l2cap_core.c
109800+++ b/net/bluetooth/l2cap_core.c
109801@@ -3534,8 +3534,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
109802 break;
109803
109804 case L2CAP_CONF_RFC:
109805- if (olen == sizeof(rfc))
109806- memcpy(&rfc, (void *)val, olen);
109807+ if (olen != sizeof(rfc))
109808+ break;
109809+
109810+ memcpy(&rfc, (void *)val, olen);
109811
109812 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
109813 rfc.mode != chan->mode)
109814diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
109815index 60694f0..32623ed 100644
109816--- a/net/bluetooth/l2cap_sock.c
109817+++ b/net/bluetooth/l2cap_sock.c
109818@@ -633,7 +633,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
109819 struct sock *sk = sock->sk;
109820 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
109821 struct l2cap_options opts;
109822- int len, err = 0;
109823+ int err = 0;
109824+ size_t len = optlen;
109825 u32 opt;
109826
109827 BT_DBG("sk %p", sk);
109828@@ -660,7 +661,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
109829 opts.max_tx = chan->max_tx;
109830 opts.txwin_size = chan->tx_win;
109831
109832- len = min_t(unsigned int, sizeof(opts), optlen);
109833+ len = min(sizeof(opts), len);
109834 if (copy_from_user((char *) &opts, optval, len)) {
109835 err = -EFAULT;
109836 break;
109837@@ -747,7 +748,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
109838 struct bt_security sec;
109839 struct bt_power pwr;
109840 struct l2cap_conn *conn;
109841- int len, err = 0;
109842+ int err = 0;
109843+ size_t len = optlen;
109844 u32 opt;
109845
109846 BT_DBG("sk %p", sk);
109847@@ -771,7 +773,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
109848
109849 sec.level = BT_SECURITY_LOW;
109850
109851- len = min_t(unsigned int, sizeof(sec), optlen);
109852+ len = min(sizeof(sec), len);
109853 if (copy_from_user((char *) &sec, optval, len)) {
109854 err = -EFAULT;
109855 break;
109856@@ -867,7 +869,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
109857
109858 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
109859
109860- len = min_t(unsigned int, sizeof(pwr), optlen);
109861+ len = min(sizeof(pwr), len);
109862 if (copy_from_user((char *) &pwr, optval, len)) {
109863 err = -EFAULT;
109864 break;
109865diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
109866index 3c6d2c8..6afc970 100644
109867--- a/net/bluetooth/rfcomm/sock.c
109868+++ b/net/bluetooth/rfcomm/sock.c
109869@@ -686,7 +686,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
109870 struct sock *sk = sock->sk;
109871 struct bt_security sec;
109872 int err = 0;
109873- size_t len;
109874+ size_t len = optlen;
109875 u32 opt;
109876
109877 BT_DBG("sk %p", sk);
109878@@ -708,7 +708,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
109879
109880 sec.level = BT_SECURITY_LOW;
109881
109882- len = min_t(unsigned int, sizeof(sec), optlen);
109883+ len = min(sizeof(sec), len);
109884 if (copy_from_user((char *) &sec, optval, len)) {
109885 err = -EFAULT;
109886 break;
109887diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
109888index 8e385a0..a5bdd8e 100644
109889--- a/net/bluetooth/rfcomm/tty.c
109890+++ b/net/bluetooth/rfcomm/tty.c
109891@@ -752,7 +752,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
109892 BT_DBG("tty %p id %d", tty, tty->index);
109893
109894 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
109895- dev->channel, dev->port.count);
109896+ dev->channel, atomic_read(&dev->port.count));
109897
109898 err = tty_port_open(&dev->port, tty, filp);
109899 if (err)
109900@@ -775,7 +775,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
109901 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
109902
109903 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
109904- dev->port.count);
109905+ atomic_read(&dev->port.count));
109906
109907 tty_port_close(&dev->port, tty, filp);
109908 }
109909diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
109910index 4fbcea0..69a6786 100644
109911--- a/net/bridge/br_netlink.c
109912+++ b/net/bridge/br_netlink.c
109913@@ -726,7 +726,7 @@ static struct rtnl_af_ops br_af_ops __read_mostly = {
109914 .get_link_af_size = br_get_link_af_size,
109915 };
109916
109917-struct rtnl_link_ops br_link_ops __read_mostly = {
109918+struct rtnl_link_ops br_link_ops = {
109919 .kind = "bridge",
109920 .priv_size = sizeof(struct net_bridge),
109921 .setup = br_dev_setup,
109922diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
109923index 91180a7..1301daa 100644
109924--- a/net/bridge/netfilter/ebtables.c
109925+++ b/net/bridge/netfilter/ebtables.c
109926@@ -1533,7 +1533,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
109927 tmp.valid_hooks = t->table->valid_hooks;
109928 }
109929 mutex_unlock(&ebt_mutex);
109930- if (copy_to_user(user, &tmp, *len) != 0) {
109931+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
109932 BUGPRINT("c2u Didn't work\n");
109933 ret = -EFAULT;
109934 break;
109935@@ -2339,7 +2339,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
109936 goto out;
109937 tmp.valid_hooks = t->valid_hooks;
109938
109939- if (copy_to_user(user, &tmp, *len) != 0) {
109940+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
109941 ret = -EFAULT;
109942 break;
109943 }
109944@@ -2350,7 +2350,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
109945 tmp.entries_size = t->table->entries_size;
109946 tmp.valid_hooks = t->table->valid_hooks;
109947
109948- if (copy_to_user(user, &tmp, *len) != 0) {
109949+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
109950 ret = -EFAULT;
109951 break;
109952 }
109953diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
109954index f5afda1..dcf770a 100644
109955--- a/net/caif/cfctrl.c
109956+++ b/net/caif/cfctrl.c
109957@@ -10,6 +10,7 @@
109958 #include <linux/spinlock.h>
109959 #include <linux/slab.h>
109960 #include <linux/pkt_sched.h>
109961+#include <linux/sched.h>
109962 #include <net/caif/caif_layer.h>
109963 #include <net/caif/cfpkt.h>
109964 #include <net/caif/cfctrl.h>
109965@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
109966 memset(&dev_info, 0, sizeof(dev_info));
109967 dev_info.id = 0xff;
109968 cfsrvl_init(&this->serv, 0, &dev_info, false);
109969- atomic_set(&this->req_seq_no, 1);
109970- atomic_set(&this->rsp_seq_no, 1);
109971+ atomic_set_unchecked(&this->req_seq_no, 1);
109972+ atomic_set_unchecked(&this->rsp_seq_no, 1);
109973 this->serv.layer.receive = cfctrl_recv;
109974 sprintf(this->serv.layer.name, "ctrl");
109975 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
109976@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
109977 struct cfctrl_request_info *req)
109978 {
109979 spin_lock_bh(&ctrl->info_list_lock);
109980- atomic_inc(&ctrl->req_seq_no);
109981- req->sequence_no = atomic_read(&ctrl->req_seq_no);
109982+ atomic_inc_unchecked(&ctrl->req_seq_no);
109983+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
109984 list_add_tail(&req->list, &ctrl->list);
109985 spin_unlock_bh(&ctrl->info_list_lock);
109986 }
109987@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
109988 if (p != first)
109989 pr_warn("Requests are not received in order\n");
109990
109991- atomic_set(&ctrl->rsp_seq_no,
109992+ atomic_set_unchecked(&ctrl->rsp_seq_no,
109993 p->sequence_no);
109994 list_del(&p->list);
109995 goto out;
109996diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
109997index 67a4a36..8d28068 100644
109998--- a/net/caif/chnl_net.c
109999+++ b/net/caif/chnl_net.c
110000@@ -515,7 +515,7 @@ static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = {
110001 };
110002
110003
110004-static struct rtnl_link_ops ipcaif_link_ops __read_mostly = {
110005+static struct rtnl_link_ops ipcaif_link_ops = {
110006 .kind = "caif",
110007 .priv_size = sizeof(struct chnl_net),
110008 .setup = ipcaif_net_setup,
110009diff --git a/net/can/af_can.c b/net/can/af_can.c
110010index 32d710e..93bcf05 100644
110011--- a/net/can/af_can.c
110012+++ b/net/can/af_can.c
110013@@ -884,7 +884,7 @@ static const struct net_proto_family can_family_ops = {
110014 };
110015
110016 /* notifier block for netdevice event */
110017-static struct notifier_block can_netdev_notifier __read_mostly = {
110018+static struct notifier_block can_netdev_notifier = {
110019 .notifier_call = can_notifier,
110020 };
110021
110022diff --git a/net/can/bcm.c b/net/can/bcm.c
110023index ee9ffd9..dfdf3d4 100644
110024--- a/net/can/bcm.c
110025+++ b/net/can/bcm.c
110026@@ -1619,7 +1619,7 @@ static int __init bcm_module_init(void)
110027 }
110028
110029 /* create /proc/net/can-bcm directory */
110030- proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
110031+ proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net);
110032 return 0;
110033 }
110034
110035diff --git a/net/can/gw.c b/net/can/gw.c
110036index a6f448e..5902171 100644
110037--- a/net/can/gw.c
110038+++ b/net/can/gw.c
110039@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
110040 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
110041
110042 static HLIST_HEAD(cgw_list);
110043-static struct notifier_block notifier;
110044
110045 static struct kmem_cache *cgw_cache __read_mostly;
110046
110047@@ -948,6 +947,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
110048 return err;
110049 }
110050
110051+static struct notifier_block notifier = {
110052+ .notifier_call = cgw_notifier
110053+};
110054+
110055 static __init int cgw_module_init(void)
110056 {
110057 /* sanitize given module parameter */
110058@@ -963,7 +966,6 @@ static __init int cgw_module_init(void)
110059 return -ENOMEM;
110060
110061 /* set notifier */
110062- notifier.notifier_call = cgw_notifier;
110063 register_netdevice_notifier(&notifier);
110064
110065 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
110066diff --git a/net/can/proc.c b/net/can/proc.c
110067index 1a19b98..df2b4ec 100644
110068--- a/net/can/proc.c
110069+++ b/net/can/proc.c
110070@@ -514,7 +514,7 @@ static void can_remove_proc_readentry(const char *name)
110071 void can_init_proc(void)
110072 {
110073 /* create /proc/net/can directory */
110074- can_dir = proc_mkdir("can", init_net.proc_net);
110075+ can_dir = proc_mkdir_restrict("can", init_net.proc_net);
110076
110077 if (!can_dir) {
110078 printk(KERN_INFO "can: failed to create /proc/net/can . "
110079diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
110080index a9f4ae4..ee19b92 100644
110081--- a/net/ceph/messenger.c
110082+++ b/net/ceph/messenger.c
110083@@ -188,7 +188,7 @@ static void con_fault(struct ceph_connection *con);
110084 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
110085
110086 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
110087-static atomic_t addr_str_seq = ATOMIC_INIT(0);
110088+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
110089
110090 static struct page *zero_page; /* used in certain error cases */
110091
110092@@ -199,7 +199,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
110093 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
110094 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
110095
110096- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
110097+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
110098 s = addr_str[i];
110099
110100 switch (ss->ss_family) {
110101diff --git a/net/compat.c b/net/compat.c
110102index f7bd286..76ea56a 100644
110103--- a/net/compat.c
110104+++ b/net/compat.c
110105@@ -100,20 +100,20 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg,
110106
110107 #define CMSG_COMPAT_FIRSTHDR(msg) \
110108 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
110109- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
110110+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
110111 (struct compat_cmsghdr __user *)NULL)
110112
110113 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
110114 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
110115 (ucmlen) <= (unsigned long) \
110116 ((mhdr)->msg_controllen - \
110117- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
110118+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
110119
110120 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
110121 struct compat_cmsghdr __user *cmsg, int cmsg_len)
110122 {
110123 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
110124- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
110125+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
110126 msg->msg_controllen)
110127 return NULL;
110128 return (struct compat_cmsghdr __user *)ptr;
110129@@ -203,7 +203,7 @@ Efault:
110130
110131 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
110132 {
110133- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
110134+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
110135 struct compat_cmsghdr cmhdr;
110136 struct compat_timeval ctv;
110137 struct compat_timespec cts[3];
110138@@ -259,7 +259,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
110139
110140 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
110141 {
110142- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
110143+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
110144 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
110145 int fdnum = scm->fp->count;
110146 struct file **fp = scm->fp->fp;
110147@@ -347,7 +347,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
110148 return -EFAULT;
110149 old_fs = get_fs();
110150 set_fs(KERNEL_DS);
110151- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
110152+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
110153 set_fs(old_fs);
110154
110155 return err;
110156@@ -408,7 +408,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
110157 len = sizeof(ktime);
110158 old_fs = get_fs();
110159 set_fs(KERNEL_DS);
110160- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
110161+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
110162 set_fs(old_fs);
110163
110164 if (!err) {
110165@@ -551,7 +551,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
110166 case MCAST_JOIN_GROUP:
110167 case MCAST_LEAVE_GROUP:
110168 {
110169- struct compat_group_req __user *gr32 = (void *)optval;
110170+ struct compat_group_req __user *gr32 = (void __user *)optval;
110171 struct group_req __user *kgr =
110172 compat_alloc_user_space(sizeof(struct group_req));
110173 u32 interface;
110174@@ -572,7 +572,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
110175 case MCAST_BLOCK_SOURCE:
110176 case MCAST_UNBLOCK_SOURCE:
110177 {
110178- struct compat_group_source_req __user *gsr32 = (void *)optval;
110179+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
110180 struct group_source_req __user *kgsr = compat_alloc_user_space(
110181 sizeof(struct group_source_req));
110182 u32 interface;
110183@@ -593,7 +593,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
110184 }
110185 case MCAST_MSFILTER:
110186 {
110187- struct compat_group_filter __user *gf32 = (void *)optval;
110188+ struct compat_group_filter __user *gf32 = (void __user *)optval;
110189 struct group_filter __user *kgf;
110190 u32 interface, fmode, numsrc;
110191
110192@@ -631,7 +631,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
110193 char __user *optval, int __user *optlen,
110194 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
110195 {
110196- struct compat_group_filter __user *gf32 = (void *)optval;
110197+ struct compat_group_filter __user *gf32 = (void __user *)optval;
110198 struct group_filter __user *kgf;
110199 int __user *koptlen;
110200 u32 interface, fmode, numsrc;
110201@@ -775,7 +775,7 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
110202
110203 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
110204 return -EINVAL;
110205- if (copy_from_user(a, args, nas[call]))
110206+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
110207 return -EFAULT;
110208 a0 = a[0];
110209 a1 = a[1];
110210diff --git a/net/core/datagram.c b/net/core/datagram.c
110211index df493d6..1145766 100644
110212--- a/net/core/datagram.c
110213+++ b/net/core/datagram.c
110214@@ -302,7 +302,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
110215 }
110216
110217 kfree_skb(skb);
110218- atomic_inc(&sk->sk_drops);
110219+ atomic_inc_unchecked(&sk->sk_drops);
110220 sk_mem_reclaim_partial(sk);
110221
110222 return err;
110223diff --git a/net/core/dev.c b/net/core/dev.c
110224index 22a53ac..1d19af7 100644
110225--- a/net/core/dev.c
110226+++ b/net/core/dev.c
110227@@ -1681,14 +1681,14 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
110228 {
110229 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
110230 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
110231- atomic_long_inc(&dev->rx_dropped);
110232+ atomic_long_inc_unchecked(&dev->rx_dropped);
110233 kfree_skb(skb);
110234 return NET_RX_DROP;
110235 }
110236 }
110237
110238 if (unlikely(!is_skb_forwardable(dev, skb))) {
110239- atomic_long_inc(&dev->rx_dropped);
110240+ atomic_long_inc_unchecked(&dev->rx_dropped);
110241 kfree_skb(skb);
110242 return NET_RX_DROP;
110243 }
110244@@ -2987,7 +2987,7 @@ recursion_alert:
110245 drop:
110246 rcu_read_unlock_bh();
110247
110248- atomic_long_inc(&dev->tx_dropped);
110249+ atomic_long_inc_unchecked(&dev->tx_dropped);
110250 kfree_skb_list(skb);
110251 return rc;
110252 out:
110253@@ -3336,7 +3336,7 @@ enqueue:
110254
110255 local_irq_restore(flags);
110256
110257- atomic_long_inc(&skb->dev->rx_dropped);
110258+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
110259 kfree_skb(skb);
110260 return NET_RX_DROP;
110261 }
110262@@ -3413,7 +3413,7 @@ int netif_rx_ni(struct sk_buff *skb)
110263 }
110264 EXPORT_SYMBOL(netif_rx_ni);
110265
110266-static void net_tx_action(struct softirq_action *h)
110267+static __latent_entropy void net_tx_action(void)
110268 {
110269 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
110270
110271@@ -3751,7 +3751,7 @@ ncls:
110272 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
110273 } else {
110274 drop:
110275- atomic_long_inc(&skb->dev->rx_dropped);
110276+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
110277 kfree_skb(skb);
110278 /* Jamal, now you will not able to escape explaining
110279 * me how you were going to use this. :-)
110280@@ -4640,7 +4640,7 @@ out_unlock:
110281 return work;
110282 }
110283
110284-static void net_rx_action(struct softirq_action *h)
110285+static __latent_entropy void net_rx_action(void)
110286 {
110287 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
110288 unsigned long time_limit = jiffies + 2;
110289@@ -6676,8 +6676,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
110290 } else {
110291 netdev_stats_to_stats64(storage, &dev->stats);
110292 }
110293- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
110294- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
110295+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
110296+ storage->tx_dropped += atomic_long_read_unchecked(&dev->tx_dropped);
110297 return storage;
110298 }
110299 EXPORT_SYMBOL(dev_get_stats);
110300diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
110301index b94b1d2..da3ed7c 100644
110302--- a/net/core/dev_ioctl.c
110303+++ b/net/core/dev_ioctl.c
110304@@ -368,8 +368,13 @@ void dev_load(struct net *net, const char *name)
110305 no_module = !dev;
110306 if (no_module && capable(CAP_NET_ADMIN))
110307 no_module = request_module("netdev-%s", name);
110308- if (no_module && capable(CAP_SYS_MODULE))
110309+ if (no_module && capable(CAP_SYS_MODULE)) {
110310+#ifdef CONFIG_GRKERNSEC_MODHARDEN
110311+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
110312+#else
110313 request_module("%s", name);
110314+#endif
110315+ }
110316 }
110317 EXPORT_SYMBOL(dev_load);
110318
110319diff --git a/net/core/filter.c b/net/core/filter.c
110320index f6bdc2b..76eba8e 100644
110321--- a/net/core/filter.c
110322+++ b/net/core/filter.c
110323@@ -533,7 +533,11 @@ do_pass:
110324
110325 /* Unknown instruction. */
110326 default:
110327- goto err;
110328+ WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
110329+ fp->code, fp->jt, fp->jf, fp->k);
110330+ kfree(addrs);
110331+ BUG();
110332+ return -EINVAL;
110333 }
110334
110335 insn++;
110336@@ -577,7 +581,7 @@ static int check_load_and_stores(const struct sock_filter *filter, int flen)
110337 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
110338 int pc, ret = 0;
110339
110340- BUILD_BUG_ON(BPF_MEMWORDS > 16);
110341+ BUILD_BUG_ON(BPF_MEMWORDS != 16);
110342
110343 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
110344 if (!masks)
110345@@ -992,7 +996,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
110346 if (!fp)
110347 return -ENOMEM;
110348
110349- memcpy(fp->insns, fprog->filter, fsize);
110350+ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
110351
110352 fp->len = fprog->len;
110353 /* Since unattached filters are not copied back to user
110354diff --git a/net/core/flow.c b/net/core/flow.c
110355index 1033725..340f65d 100644
110356--- a/net/core/flow.c
110357+++ b/net/core/flow.c
110358@@ -65,7 +65,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
110359 static int flow_entry_valid(struct flow_cache_entry *fle,
110360 struct netns_xfrm *xfrm)
110361 {
110362- if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
110363+ if (atomic_read_unchecked(&xfrm->flow_cache_genid) != fle->genid)
110364 return 0;
110365 if (fle->object && !fle->object->ops->check(fle->object))
110366 return 0;
110367@@ -242,7 +242,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
110368 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
110369 fcp->hash_count++;
110370 }
110371- } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
110372+ } else if (likely(fle->genid == atomic_read_unchecked(&net->xfrm.flow_cache_genid))) {
110373 flo = fle->object;
110374 if (!flo)
110375 goto ret_object;
110376@@ -263,7 +263,7 @@ nocache:
110377 }
110378 flo = resolver(net, key, family, dir, flo, ctx);
110379 if (fle) {
110380- fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
110381+ fle->genid = atomic_read_unchecked(&net->xfrm.flow_cache_genid);
110382 if (!IS_ERR(flo))
110383 fle->object = flo;
110384 else
110385diff --git a/net/core/neighbour.c b/net/core/neighbour.c
110386index 70fe9e1..926784c 100644
110387--- a/net/core/neighbour.c
110388+++ b/net/core/neighbour.c
110389@@ -2806,7 +2806,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
110390 void __user *buffer, size_t *lenp, loff_t *ppos)
110391 {
110392 int size, ret;
110393- struct ctl_table tmp = *ctl;
110394+ ctl_table_no_const tmp = *ctl;
110395
110396 tmp.extra1 = &zero;
110397 tmp.extra2 = &unres_qlen_max;
110398@@ -2868,7 +2868,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
110399 void __user *buffer,
110400 size_t *lenp, loff_t *ppos)
110401 {
110402- struct ctl_table tmp = *ctl;
110403+ ctl_table_no_const tmp = *ctl;
110404 int ret;
110405
110406 tmp.extra1 = &zero;
110407diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
110408index 2bf8329..2eb1423 100644
110409--- a/net/core/net-procfs.c
110410+++ b/net/core/net-procfs.c
110411@@ -79,7 +79,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
110412 struct rtnl_link_stats64 temp;
110413 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
110414
110415- seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
110416+ if (gr_proc_is_restricted())
110417+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
110418+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
110419+ dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
110420+ 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL);
110421+ else
110422+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
110423 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
110424 dev->name, stats->rx_bytes, stats->rx_packets,
110425 stats->rx_errors,
110426@@ -166,7 +172,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
110427 return 0;
110428 }
110429
110430-static const struct seq_operations dev_seq_ops = {
110431+const struct seq_operations dev_seq_ops = {
110432 .start = dev_seq_start,
110433 .next = dev_seq_next,
110434 .stop = dev_seq_stop,
110435@@ -196,7 +202,7 @@ static const struct seq_operations softnet_seq_ops = {
110436
110437 static int softnet_seq_open(struct inode *inode, struct file *file)
110438 {
110439- return seq_open(file, &softnet_seq_ops);
110440+ return seq_open_restrict(file, &softnet_seq_ops);
110441 }
110442
110443 static const struct file_operations softnet_seq_fops = {
110444@@ -283,8 +289,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
110445 else
110446 seq_printf(seq, "%04x", ntohs(pt->type));
110447
110448+#ifdef CONFIG_GRKERNSEC_HIDESYM
110449+ seq_printf(seq, " %-8s %pf\n",
110450+ pt->dev ? pt->dev->name : "", NULL);
110451+#else
110452 seq_printf(seq, " %-8s %pf\n",
110453 pt->dev ? pt->dev->name : "", pt->func);
110454+#endif
110455 }
110456
110457 return 0;
110458diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
110459index f2aa73b..0d1a1ea 100644
110460--- a/net/core/net-sysfs.c
110461+++ b/net/core/net-sysfs.c
110462@@ -279,7 +279,7 @@ static ssize_t carrier_changes_show(struct device *dev,
110463 {
110464 struct net_device *netdev = to_net_dev(dev);
110465 return sprintf(buf, fmt_dec,
110466- atomic_read(&netdev->carrier_changes));
110467+ atomic_read_unchecked(&netdev->carrier_changes));
110468 }
110469 static DEVICE_ATTR_RO(carrier_changes);
110470
110471diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
110472index 70d3450..eb7c528 100644
110473--- a/net/core/net_namespace.c
110474+++ b/net/core/net_namespace.c
110475@@ -663,7 +663,7 @@ static int __register_pernet_operations(struct list_head *list,
110476 int error;
110477 LIST_HEAD(net_exit_list);
110478
110479- list_add_tail(&ops->list, list);
110480+ pax_list_add_tail((struct list_head *)&ops->list, list);
110481 if (ops->init || (ops->id && ops->size)) {
110482 for_each_net(net) {
110483 error = ops_init(ops, net);
110484@@ -676,7 +676,7 @@ static int __register_pernet_operations(struct list_head *list,
110485
110486 out_undo:
110487 /* If I have an error cleanup all namespaces I initialized */
110488- list_del(&ops->list);
110489+ pax_list_del((struct list_head *)&ops->list);
110490 ops_exit_list(ops, &net_exit_list);
110491 ops_free_list(ops, &net_exit_list);
110492 return error;
110493@@ -687,7 +687,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
110494 struct net *net;
110495 LIST_HEAD(net_exit_list);
110496
110497- list_del(&ops->list);
110498+ pax_list_del((struct list_head *)&ops->list);
110499 for_each_net(net)
110500 list_add_tail(&net->exit_list, &net_exit_list);
110501 ops_exit_list(ops, &net_exit_list);
110502@@ -821,7 +821,7 @@ int register_pernet_device(struct pernet_operations *ops)
110503 mutex_lock(&net_mutex);
110504 error = register_pernet_operations(&pernet_list, ops);
110505 if (!error && (first_device == &pernet_list))
110506- first_device = &ops->list;
110507+ first_device = (struct list_head *)&ops->list;
110508 mutex_unlock(&net_mutex);
110509 return error;
110510 }
110511diff --git a/net/core/netpoll.c b/net/core/netpoll.c
110512index c126a87..10ad89d 100644
110513--- a/net/core/netpoll.c
110514+++ b/net/core/netpoll.c
110515@@ -377,7 +377,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
110516 struct udphdr *udph;
110517 struct iphdr *iph;
110518 struct ethhdr *eth;
110519- static atomic_t ip_ident;
110520+ static atomic_unchecked_t ip_ident;
110521 struct ipv6hdr *ip6h;
110522
110523 udp_len = len + sizeof(*udph);
110524@@ -448,7 +448,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
110525 put_unaligned(0x45, (unsigned char *)iph);
110526 iph->tos = 0;
110527 put_unaligned(htons(ip_len), &(iph->tot_len));
110528- iph->id = htons(atomic_inc_return(&ip_ident));
110529+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
110530 iph->frag_off = 0;
110531 iph->ttl = 64;
110532 iph->protocol = IPPROTO_UDP;
110533diff --git a/net/core/pktgen.c b/net/core/pktgen.c
110534index 508155b..fad080f 100644
110535--- a/net/core/pktgen.c
110536+++ b/net/core/pktgen.c
110537@@ -3755,7 +3755,7 @@ static int __net_init pg_net_init(struct net *net)
110538 pn->net = net;
110539 INIT_LIST_HEAD(&pn->pktgen_threads);
110540 pn->pktgen_exiting = false;
110541- pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
110542+ pn->proc_dir = proc_mkdir_restrict(PG_PROC_DIR, pn->net->proc_net);
110543 if (!pn->proc_dir) {
110544 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
110545 return -ENODEV;
110546diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
110547index 7ebed55..378bf34 100644
110548--- a/net/core/rtnetlink.c
110549+++ b/net/core/rtnetlink.c
110550@@ -61,7 +61,7 @@ struct rtnl_link {
110551 rtnl_doit_func doit;
110552 rtnl_dumpit_func dumpit;
110553 rtnl_calcit_func calcit;
110554-};
110555+} __no_const;
110556
110557 static DEFINE_MUTEX(rtnl_mutex);
110558
110559@@ -307,10 +307,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
110560 * to use the ops for creating device. So do not
110561 * fill up dellink as well. That disables rtnl_dellink.
110562 */
110563- if (ops->setup && !ops->dellink)
110564- ops->dellink = unregister_netdevice_queue;
110565+ if (ops->setup && !ops->dellink) {
110566+ pax_open_kernel();
110567+ *(void **)&ops->dellink = unregister_netdevice_queue;
110568+ pax_close_kernel();
110569+ }
110570
110571- list_add_tail(&ops->list, &link_ops);
110572+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
110573 return 0;
110574 }
110575 EXPORT_SYMBOL_GPL(__rtnl_link_register);
110576@@ -357,7 +360,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
110577 for_each_net(net) {
110578 __rtnl_kill_links(net, ops);
110579 }
110580- list_del(&ops->list);
110581+ pax_list_del((struct list_head *)&ops->list);
110582 }
110583 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
110584
110585@@ -1047,7 +1050,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
110586 (dev->ifalias &&
110587 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
110588 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
110589- atomic_read(&dev->carrier_changes)))
110590+ atomic_read_unchecked(&dev->carrier_changes)))
110591 goto nla_put_failure;
110592
110593 if (1) {
110594diff --git a/net/core/scm.c b/net/core/scm.c
110595index 3b6899b..cf36238 100644
110596--- a/net/core/scm.c
110597+++ b/net/core/scm.c
110598@@ -209,7 +209,7 @@ EXPORT_SYMBOL(__scm_send);
110599 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
110600 {
110601 struct cmsghdr __user *cm
110602- = (__force struct cmsghdr __user *)msg->msg_control;
110603+ = (struct cmsghdr __force_user *)msg->msg_control;
110604 struct cmsghdr cmhdr;
110605 int cmlen = CMSG_LEN(len);
110606 int err;
110607@@ -232,7 +232,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
110608 err = -EFAULT;
110609 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
110610 goto out;
110611- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
110612+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
110613 goto out;
110614 cmlen = CMSG_SPACE(len);
110615 if (msg->msg_controllen < cmlen)
110616@@ -248,7 +248,7 @@ EXPORT_SYMBOL(put_cmsg);
110617 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
110618 {
110619 struct cmsghdr __user *cm
110620- = (__force struct cmsghdr __user*)msg->msg_control;
110621+ = (struct cmsghdr __force_user *)msg->msg_control;
110622
110623 int fdmax = 0;
110624 int fdnum = scm->fp->count;
110625@@ -268,7 +268,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
110626 if (fdnum < fdmax)
110627 fdmax = fdnum;
110628
110629- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
110630+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
110631 i++, cmfptr++)
110632 {
110633 struct socket *sock;
110634diff --git a/net/core/skbuff.c b/net/core/skbuff.c
110635index e9f9a15..6eb024e 100644
110636--- a/net/core/skbuff.c
110637+++ b/net/core/skbuff.c
110638@@ -2139,7 +2139,7 @@ EXPORT_SYMBOL(__skb_checksum);
110639 __wsum skb_checksum(const struct sk_buff *skb, int offset,
110640 int len, __wsum csum)
110641 {
110642- const struct skb_checksum_ops ops = {
110643+ static const struct skb_checksum_ops ops = {
110644 .update = csum_partial_ext,
110645 .combine = csum_block_add_ext,
110646 };
110647@@ -3379,12 +3379,14 @@ void __init skb_init(void)
110648 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
110649 sizeof(struct sk_buff),
110650 0,
110651- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
110652+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
110653+ SLAB_NO_SANITIZE,
110654 NULL);
110655 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
110656 sizeof(struct sk_buff_fclones),
110657 0,
110658- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
110659+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
110660+ SLAB_NO_SANITIZE,
110661 NULL);
110662 }
110663
110664diff --git a/net/core/sock.c b/net/core/sock.c
110665index 71e3e5f..ab90920 100644
110666--- a/net/core/sock.c
110667+++ b/net/core/sock.c
110668@@ -443,7 +443,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
110669 struct sk_buff_head *list = &sk->sk_receive_queue;
110670
110671 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
110672- atomic_inc(&sk->sk_drops);
110673+ atomic_inc_unchecked(&sk->sk_drops);
110674 trace_sock_rcvqueue_full(sk, skb);
110675 return -ENOMEM;
110676 }
110677@@ -453,7 +453,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
110678 return err;
110679
110680 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
110681- atomic_inc(&sk->sk_drops);
110682+ atomic_inc_unchecked(&sk->sk_drops);
110683 return -ENOBUFS;
110684 }
110685
110686@@ -466,7 +466,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
110687 skb_dst_force(skb);
110688
110689 spin_lock_irqsave(&list->lock, flags);
110690- skb->dropcount = atomic_read(&sk->sk_drops);
110691+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
110692 __skb_queue_tail(list, skb);
110693 spin_unlock_irqrestore(&list->lock, flags);
110694
110695@@ -486,7 +486,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
110696 skb->dev = NULL;
110697
110698 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
110699- atomic_inc(&sk->sk_drops);
110700+ atomic_inc_unchecked(&sk->sk_drops);
110701 goto discard_and_relse;
110702 }
110703 if (nested)
110704@@ -504,7 +504,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
110705 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
110706 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
110707 bh_unlock_sock(sk);
110708- atomic_inc(&sk->sk_drops);
110709+ atomic_inc_unchecked(&sk->sk_drops);
110710 goto discard_and_relse;
110711 }
110712
110713@@ -910,6 +910,7 @@ set_rcvbuf:
110714 }
110715 break;
110716
110717+#ifndef GRKERNSEC_BPF_HARDEN
110718 case SO_ATTACH_BPF:
110719 ret = -EINVAL;
110720 if (optlen == sizeof(u32)) {
110721@@ -922,7 +923,7 @@ set_rcvbuf:
110722 ret = sk_attach_bpf(ufd, sk);
110723 }
110724 break;
110725-
110726+#endif
110727 case SO_DETACH_FILTER:
110728 ret = sk_detach_filter(sk);
110729 break;
110730@@ -1026,12 +1027,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
110731 struct timeval tm;
110732 } v;
110733
110734- int lv = sizeof(int);
110735- int len;
110736+ unsigned int lv = sizeof(int);
110737+ unsigned int len;
110738
110739 if (get_user(len, optlen))
110740 return -EFAULT;
110741- if (len < 0)
110742+ if (len > INT_MAX)
110743 return -EINVAL;
110744
110745 memset(&v, 0, sizeof(v));
110746@@ -1169,11 +1170,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
110747
110748 case SO_PEERNAME:
110749 {
110750- char address[128];
110751+ char address[_K_SS_MAXSIZE];
110752
110753 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
110754 return -ENOTCONN;
110755- if (lv < len)
110756+ if (lv < len || sizeof address < len)
110757 return -EINVAL;
110758 if (copy_to_user(optval, address, len))
110759 return -EFAULT;
110760@@ -1258,7 +1259,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
110761
110762 if (len > lv)
110763 len = lv;
110764- if (copy_to_user(optval, &v, len))
110765+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
110766 return -EFAULT;
110767 lenout:
110768 if (put_user(len, optlen))
110769@@ -2375,7 +2376,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
110770 */
110771 smp_wmb();
110772 atomic_set(&sk->sk_refcnt, 1);
110773- atomic_set(&sk->sk_drops, 0);
110774+ atomic_set_unchecked(&sk->sk_drops, 0);
110775 }
110776 EXPORT_SYMBOL(sock_init_data);
110777
110778@@ -2503,6 +2504,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
110779 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
110780 int level, int type)
110781 {
110782+ struct sock_extended_err ee;
110783 struct sock_exterr_skb *serr;
110784 struct sk_buff *skb;
110785 int copied, err;
110786@@ -2524,7 +2526,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
110787 sock_recv_timestamp(msg, sk, skb);
110788
110789 serr = SKB_EXT_ERR(skb);
110790- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
110791+ ee = serr->ee;
110792+ put_cmsg(msg, level, type, sizeof ee, &ee);
110793
110794 msg->msg_flags |= MSG_ERRQUEUE;
110795 err = copied;
110796diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
110797index ad704c7..ca48aff 100644
110798--- a/net/core/sock_diag.c
110799+++ b/net/core/sock_diag.c
110800@@ -9,26 +9,33 @@
110801 #include <linux/inet_diag.h>
110802 #include <linux/sock_diag.h>
110803
110804-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
110805+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
110806 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
110807 static DEFINE_MUTEX(sock_diag_table_mutex);
110808
110809 int sock_diag_check_cookie(void *sk, __u32 *cookie)
110810 {
110811+#ifndef CONFIG_GRKERNSEC_HIDESYM
110812 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
110813 cookie[1] != INET_DIAG_NOCOOKIE) &&
110814 ((u32)(unsigned long)sk != cookie[0] ||
110815 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
110816 return -ESTALE;
110817 else
110818+#endif
110819 return 0;
110820 }
110821 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
110822
110823 void sock_diag_save_cookie(void *sk, __u32 *cookie)
110824 {
110825+#ifdef CONFIG_GRKERNSEC_HIDESYM
110826+ cookie[0] = 0;
110827+ cookie[1] = 0;
110828+#else
110829 cookie[0] = (u32)(unsigned long)sk;
110830 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
110831+#endif
110832 }
110833 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
110834
110835@@ -110,8 +117,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
110836 mutex_lock(&sock_diag_table_mutex);
110837 if (sock_diag_handlers[hndl->family])
110838 err = -EBUSY;
110839- else
110840+ else {
110841+ pax_open_kernel();
110842 sock_diag_handlers[hndl->family] = hndl;
110843+ pax_close_kernel();
110844+ }
110845 mutex_unlock(&sock_diag_table_mutex);
110846
110847 return err;
110848@@ -127,7 +137,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
110849
110850 mutex_lock(&sock_diag_table_mutex);
110851 BUG_ON(sock_diag_handlers[family] != hnld);
110852+ pax_open_kernel();
110853 sock_diag_handlers[family] = NULL;
110854+ pax_close_kernel();
110855 mutex_unlock(&sock_diag_table_mutex);
110856 }
110857 EXPORT_SYMBOL_GPL(sock_diag_unregister);
110858diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
110859index 8ce351f..2c388f7 100644
110860--- a/net/core/sysctl_net_core.c
110861+++ b/net/core/sysctl_net_core.c
110862@@ -36,7 +36,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
110863 {
110864 unsigned int orig_size, size;
110865 int ret, i;
110866- struct ctl_table tmp = {
110867+ ctl_table_no_const tmp = {
110868 .data = &size,
110869 .maxlen = sizeof(size),
110870 .mode = table->mode
110871@@ -204,7 +204,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
110872 void __user *buffer, size_t *lenp, loff_t *ppos)
110873 {
110874 char id[IFNAMSIZ];
110875- struct ctl_table tbl = {
110876+ ctl_table_no_const tbl = {
110877 .data = id,
110878 .maxlen = IFNAMSIZ,
110879 };
110880@@ -222,7 +222,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
110881 static int proc_do_rss_key(struct ctl_table *table, int write,
110882 void __user *buffer, size_t *lenp, loff_t *ppos)
110883 {
110884- struct ctl_table fake_table;
110885+ ctl_table_no_const fake_table;
110886 char buf[NETDEV_RSS_KEY_LEN * 3];
110887
110888 snprintf(buf, sizeof(buf), "%*phC", NETDEV_RSS_KEY_LEN, netdev_rss_key);
110889@@ -286,7 +286,7 @@ static struct ctl_table net_core_table[] = {
110890 .mode = 0444,
110891 .proc_handler = proc_do_rss_key,
110892 },
110893-#ifdef CONFIG_BPF_JIT
110894+#if defined(CONFIG_BPF_JIT) && !defined(CONFIG_GRKERNSEC_BPF_HARDEN)
110895 {
110896 .procname = "bpf_jit_enable",
110897 .data = &bpf_jit_enable,
110898@@ -411,13 +411,12 @@ static struct ctl_table netns_core_table[] = {
110899
110900 static __net_init int sysctl_core_net_init(struct net *net)
110901 {
110902- struct ctl_table *tbl;
110903+ ctl_table_no_const *tbl = NULL;
110904
110905 net->core.sysctl_somaxconn = SOMAXCONN;
110906
110907- tbl = netns_core_table;
110908 if (!net_eq(net, &init_net)) {
110909- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
110910+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
110911 if (tbl == NULL)
110912 goto err_dup;
110913
110914@@ -427,17 +426,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
110915 if (net->user_ns != &init_user_ns) {
110916 tbl[0].procname = NULL;
110917 }
110918- }
110919-
110920- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
110921+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
110922+ } else
110923+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
110924 if (net->core.sysctl_hdr == NULL)
110925 goto err_reg;
110926
110927 return 0;
110928
110929 err_reg:
110930- if (tbl != netns_core_table)
110931- kfree(tbl);
110932+ kfree(tbl);
110933 err_dup:
110934 return -ENOMEM;
110935 }
110936@@ -452,7 +450,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
110937 kfree(tbl);
110938 }
110939
110940-static __net_initdata struct pernet_operations sysctl_core_ops = {
110941+static __net_initconst struct pernet_operations sysctl_core_ops = {
110942 .init = sysctl_core_net_init,
110943 .exit = sysctl_core_net_exit,
110944 };
110945diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
110946index 8102286..a0c2755 100644
110947--- a/net/decnet/af_decnet.c
110948+++ b/net/decnet/af_decnet.c
110949@@ -466,6 +466,7 @@ static struct proto dn_proto = {
110950 .sysctl_rmem = sysctl_decnet_rmem,
110951 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
110952 .obj_size = sizeof(struct dn_sock),
110953+ .slab_flags = SLAB_USERCOPY,
110954 };
110955
110956 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
110957diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
110958index b2c26b0..41f803e 100644
110959--- a/net/decnet/dn_dev.c
110960+++ b/net/decnet/dn_dev.c
110961@@ -201,7 +201,7 @@ static struct dn_dev_sysctl_table {
110962 .extra1 = &min_t3,
110963 .extra2 = &max_t3
110964 },
110965- {0}
110966+ { }
110967 },
110968 };
110969
110970diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
110971index 5325b54..a0d4d69 100644
110972--- a/net/decnet/sysctl_net_decnet.c
110973+++ b/net/decnet/sysctl_net_decnet.c
110974@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
110975
110976 if (len > *lenp) len = *lenp;
110977
110978- if (copy_to_user(buffer, addr, len))
110979+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
110980 return -EFAULT;
110981
110982 *lenp = len;
110983@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
110984
110985 if (len > *lenp) len = *lenp;
110986
110987- if (copy_to_user(buffer, devname, len))
110988+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
110989 return -EFAULT;
110990
110991 *lenp = len;
110992diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
110993index a2c7e4c..3dc9f67 100644
110994--- a/net/hsr/hsr_netlink.c
110995+++ b/net/hsr/hsr_netlink.c
110996@@ -102,7 +102,7 @@ nla_put_failure:
110997 return -EMSGSIZE;
110998 }
110999
111000-static struct rtnl_link_ops hsr_link_ops __read_mostly = {
111001+static struct rtnl_link_ops hsr_link_ops = {
111002 .kind = "hsr",
111003 .maxtype = IFLA_HSR_MAX,
111004 .policy = hsr_policy,
111005diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
111006index 055fbb7..c0dbe60 100644
111007--- a/net/ieee802154/6lowpan/core.c
111008+++ b/net/ieee802154/6lowpan/core.c
111009@@ -217,7 +217,7 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
111010 dev_put(real_dev);
111011 }
111012
111013-static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
111014+static struct rtnl_link_ops lowpan_link_ops = {
111015 .kind = "lowpan",
111016 .priv_size = sizeof(struct lowpan_dev_info),
111017 .setup = lowpan_setup,
111018diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
111019index f46e4d1..30231f1 100644
111020--- a/net/ieee802154/6lowpan/reassembly.c
111021+++ b/net/ieee802154/6lowpan/reassembly.c
111022@@ -435,14 +435,13 @@ static struct ctl_table lowpan_frags_ctl_table[] = {
111023
111024 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
111025 {
111026- struct ctl_table *table;
111027+ ctl_table_no_const *table = NULL;
111028 struct ctl_table_header *hdr;
111029 struct netns_ieee802154_lowpan *ieee802154_lowpan =
111030 net_ieee802154_lowpan(net);
111031
111032- table = lowpan_frags_ns_ctl_table;
111033 if (!net_eq(net, &init_net)) {
111034- table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
111035+ table = kmemdup(lowpan_frags_ns_ctl_table, sizeof(lowpan_frags_ns_ctl_table),
111036 GFP_KERNEL);
111037 if (table == NULL)
111038 goto err_alloc;
111039@@ -457,9 +456,9 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
111040 /* Don't export sysctls to unprivileged users */
111041 if (net->user_ns != &init_user_ns)
111042 table[0].procname = NULL;
111043- }
111044-
111045- hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
111046+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
111047+ } else
111048+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", lowpan_frags_ns_ctl_table);
111049 if (hdr == NULL)
111050 goto err_reg;
111051
111052@@ -467,8 +466,7 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
111053 return 0;
111054
111055 err_reg:
111056- if (!net_eq(net, &init_net))
111057- kfree(table);
111058+ kfree(table);
111059 err_alloc:
111060 return -ENOMEM;
111061 }
111062diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
111063index d2e49ba..f78e8aa 100644
111064--- a/net/ipv4/af_inet.c
111065+++ b/net/ipv4/af_inet.c
111066@@ -1390,7 +1390,7 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
111067 return ip_recv_error(sk, msg, len, addr_len);
111068 #if IS_ENABLED(CONFIG_IPV6)
111069 if (sk->sk_family == AF_INET6)
111070- return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
111071+ return pingv6_ops->ipv6_recv_error(sk, msg, len, addr_len);
111072 #endif
111073 return -EINVAL;
111074 }
111075diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
111076index 3a8985c..9d2a870 100644
111077--- a/net/ipv4/devinet.c
111078+++ b/net/ipv4/devinet.c
111079@@ -69,7 +69,8 @@
111080
111081 static struct ipv4_devconf ipv4_devconf = {
111082 .data = {
111083- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
111084+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
111085+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
111086 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
111087 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
111088 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
111089@@ -80,7 +81,8 @@ static struct ipv4_devconf ipv4_devconf = {
111090
111091 static struct ipv4_devconf ipv4_devconf_dflt = {
111092 .data = {
111093- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
111094+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
111095+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
111096 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
111097 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
111098 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
111099@@ -1549,7 +1551,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
111100 idx = 0;
111101 head = &net->dev_index_head[h];
111102 rcu_read_lock();
111103- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
111104+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
111105 net->dev_base_seq;
111106 hlist_for_each_entry_rcu(dev, head, index_hlist) {
111107 if (idx < s_idx)
111108@@ -1868,7 +1870,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
111109 idx = 0;
111110 head = &net->dev_index_head[h];
111111 rcu_read_lock();
111112- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
111113+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
111114 net->dev_base_seq;
111115 hlist_for_each_entry_rcu(dev, head, index_hlist) {
111116 if (idx < s_idx)
111117@@ -2103,7 +2105,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
111118 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
111119 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
111120
111121-static struct devinet_sysctl_table {
111122+static const struct devinet_sysctl_table {
111123 struct ctl_table_header *sysctl_header;
111124 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
111125 } devinet_sysctl = {
111126@@ -2235,7 +2237,7 @@ static __net_init int devinet_init_net(struct net *net)
111127 int err;
111128 struct ipv4_devconf *all, *dflt;
111129 #ifdef CONFIG_SYSCTL
111130- struct ctl_table *tbl = ctl_forward_entry;
111131+ ctl_table_no_const *tbl = NULL;
111132 struct ctl_table_header *forw_hdr;
111133 #endif
111134
111135@@ -2253,7 +2255,7 @@ static __net_init int devinet_init_net(struct net *net)
111136 goto err_alloc_dflt;
111137
111138 #ifdef CONFIG_SYSCTL
111139- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
111140+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
111141 if (tbl == NULL)
111142 goto err_alloc_ctl;
111143
111144@@ -2273,7 +2275,10 @@ static __net_init int devinet_init_net(struct net *net)
111145 goto err_reg_dflt;
111146
111147 err = -ENOMEM;
111148- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
111149+ if (!net_eq(net, &init_net))
111150+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
111151+ else
111152+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
111153 if (forw_hdr == NULL)
111154 goto err_reg_ctl;
111155 net->ipv4.forw_hdr = forw_hdr;
111156@@ -2289,8 +2294,7 @@ err_reg_ctl:
111157 err_reg_dflt:
111158 __devinet_sysctl_unregister(all);
111159 err_reg_all:
111160- if (tbl != ctl_forward_entry)
111161- kfree(tbl);
111162+ kfree(tbl);
111163 err_alloc_ctl:
111164 #endif
111165 if (dflt != &ipv4_devconf_dflt)
111166diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
111167index 23b9b3e..60cf0c4 100644
111168--- a/net/ipv4/fib_frontend.c
111169+++ b/net/ipv4/fib_frontend.c
111170@@ -1020,12 +1020,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
111171 #ifdef CONFIG_IP_ROUTE_MULTIPATH
111172 fib_sync_up(dev);
111173 #endif
111174- atomic_inc(&net->ipv4.dev_addr_genid);
111175+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
111176 rt_cache_flush(dev_net(dev));
111177 break;
111178 case NETDEV_DOWN:
111179 fib_del_ifaddr(ifa, NULL);
111180- atomic_inc(&net->ipv4.dev_addr_genid);
111181+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
111182 if (ifa->ifa_dev->ifa_list == NULL) {
111183 /* Last address was deleted from this interface.
111184 * Disable IP.
111185@@ -1063,7 +1063,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
111186 #ifdef CONFIG_IP_ROUTE_MULTIPATH
111187 fib_sync_up(dev);
111188 #endif
111189- atomic_inc(&net->ipv4.dev_addr_genid);
111190+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
111191 rt_cache_flush(net);
111192 break;
111193 case NETDEV_DOWN:
111194diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
111195index 1e2090e..351a724 100644
111196--- a/net/ipv4/fib_semantics.c
111197+++ b/net/ipv4/fib_semantics.c
111198@@ -753,7 +753,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
111199 nh->nh_saddr = inet_select_addr(nh->nh_dev,
111200 nh->nh_gw,
111201 nh->nh_parent->fib_scope);
111202- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
111203+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
111204
111205 return nh->nh_saddr;
111206 }
111207diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
111208index ff069f6..335e752 100644
111209--- a/net/ipv4/fou.c
111210+++ b/net/ipv4/fou.c
111211@@ -771,12 +771,12 @@ EXPORT_SYMBOL(gue_build_header);
111212
111213 #ifdef CONFIG_NET_FOU_IP_TUNNELS
111214
111215-static const struct ip_tunnel_encap_ops __read_mostly fou_iptun_ops = {
111216+static const struct ip_tunnel_encap_ops fou_iptun_ops = {
111217 .encap_hlen = fou_encap_hlen,
111218 .build_header = fou_build_header,
111219 };
111220
111221-static const struct ip_tunnel_encap_ops __read_mostly gue_iptun_ops = {
111222+static const struct ip_tunnel_encap_ops gue_iptun_ops = {
111223 .encap_hlen = gue_encap_hlen,
111224 .build_header = gue_build_header,
111225 };
111226diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
111227index 9111a4e..3576905 100644
111228--- a/net/ipv4/inet_hashtables.c
111229+++ b/net/ipv4/inet_hashtables.c
111230@@ -18,6 +18,7 @@
111231 #include <linux/sched.h>
111232 #include <linux/slab.h>
111233 #include <linux/wait.h>
111234+#include <linux/security.h>
111235
111236 #include <net/inet_connection_sock.h>
111237 #include <net/inet_hashtables.h>
111238@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
111239 return inet_ehashfn(net, laddr, lport, faddr, fport);
111240 }
111241
111242+extern void gr_update_task_in_ip_table(const struct inet_sock *inet);
111243+
111244 /*
111245 * Allocate and initialize a new local port bind bucket.
111246 * The bindhash mutex for snum's hash chain must be held here.
111247@@ -554,6 +557,8 @@ ok:
111248 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
111249 spin_unlock(&head->lock);
111250
111251+ gr_update_task_in_ip_table(inet_sk(sk));
111252+
111253 if (tw) {
111254 inet_twsk_deschedule(tw, death_row);
111255 while (twrefcnt) {
111256diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
111257index 241afd7..31b95d5 100644
111258--- a/net/ipv4/inetpeer.c
111259+++ b/net/ipv4/inetpeer.c
111260@@ -461,7 +461,7 @@ relookup:
111261 if (p) {
111262 p->daddr = *daddr;
111263 atomic_set(&p->refcnt, 1);
111264- atomic_set(&p->rid, 0);
111265+ atomic_set_unchecked(&p->rid, 0);
111266 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
111267 p->rate_tokens = 0;
111268 /* 60*HZ is arbitrary, but chosen enough high so that the first
111269diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
111270index 145a50c..5dd8cc5 100644
111271--- a/net/ipv4/ip_fragment.c
111272+++ b/net/ipv4/ip_fragment.c
111273@@ -268,7 +268,7 @@ static int ip_frag_too_far(struct ipq *qp)
111274 return 0;
111275
111276 start = qp->rid;
111277- end = atomic_inc_return(&peer->rid);
111278+ end = atomic_inc_return_unchecked(&peer->rid);
111279 qp->rid = end;
111280
111281 rc = qp->q.fragments && (end - start) > max;
111282@@ -748,12 +748,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
111283
111284 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
111285 {
111286- struct ctl_table *table;
111287+ ctl_table_no_const *table = NULL;
111288 struct ctl_table_header *hdr;
111289
111290- table = ip4_frags_ns_ctl_table;
111291 if (!net_eq(net, &init_net)) {
111292- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
111293+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
111294 if (table == NULL)
111295 goto err_alloc;
111296
111297@@ -767,9 +766,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
111298 /* Don't export sysctls to unprivileged users */
111299 if (net->user_ns != &init_user_ns)
111300 table[0].procname = NULL;
111301- }
111302+ hdr = register_net_sysctl(net, "net/ipv4", table);
111303+ } else
111304+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
111305
111306- hdr = register_net_sysctl(net, "net/ipv4", table);
111307 if (hdr == NULL)
111308 goto err_reg;
111309
111310@@ -777,8 +777,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
111311 return 0;
111312
111313 err_reg:
111314- if (!net_eq(net, &init_net))
111315- kfree(table);
111316+ kfree(table);
111317 err_alloc:
111318 return -ENOMEM;
111319 }
111320diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
111321index 6207275f..00323a2 100644
111322--- a/net/ipv4/ip_gre.c
111323+++ b/net/ipv4/ip_gre.c
111324@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
111325 module_param(log_ecn_error, bool, 0644);
111326 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
111327
111328-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
111329+static struct rtnl_link_ops ipgre_link_ops;
111330 static int ipgre_tunnel_init(struct net_device *dev);
111331
111332 static int ipgre_net_id __read_mostly;
111333@@ -817,7 +817,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
111334 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
111335 };
111336
111337-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
111338+static struct rtnl_link_ops ipgre_link_ops = {
111339 .kind = "gre",
111340 .maxtype = IFLA_GRE_MAX,
111341 .policy = ipgre_policy,
111342@@ -832,7 +832,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
111343 .get_link_net = ip_tunnel_get_link_net,
111344 };
111345
111346-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
111347+static struct rtnl_link_ops ipgre_tap_ops = {
111348 .kind = "gretap",
111349 .maxtype = IFLA_GRE_MAX,
111350 .policy = ipgre_policy,
111351diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
111352index 3d4da2c..40f9c29 100644
111353--- a/net/ipv4/ip_input.c
111354+++ b/net/ipv4/ip_input.c
111355@@ -147,6 +147,10 @@
111356 #include <linux/mroute.h>
111357 #include <linux/netlink.h>
111358
111359+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
111360+extern int grsec_enable_blackhole;
111361+#endif
111362+
111363 /*
111364 * Process Router Attention IP option (RFC 2113)
111365 */
111366@@ -223,6 +227,9 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
111367 if (!raw) {
111368 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
111369 IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
111370+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
111371+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
111372+#endif
111373 icmp_send(skb, ICMP_DEST_UNREACH,
111374 ICMP_PROT_UNREACH, 0);
111375 }
111376diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
111377index 5cd9927..8610b9f 100644
111378--- a/net/ipv4/ip_sockglue.c
111379+++ b/net/ipv4/ip_sockglue.c
111380@@ -1254,7 +1254,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
111381 len = min_t(unsigned int, len, opt->optlen);
111382 if (put_user(len, optlen))
111383 return -EFAULT;
111384- if (copy_to_user(optval, opt->__data, len))
111385+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
111386+ copy_to_user(optval, opt->__data, len))
111387 return -EFAULT;
111388 return 0;
111389 }
111390@@ -1388,7 +1389,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
111391 if (sk->sk_type != SOCK_STREAM)
111392 return -ENOPROTOOPT;
111393
111394- msg.msg_control = (__force void *) optval;
111395+ msg.msg_control = (__force_kernel void *) optval;
111396 msg.msg_controllen = len;
111397 msg.msg_flags = flags;
111398
111399diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
111400index 94efe14..1453fcc 100644
111401--- a/net/ipv4/ip_vti.c
111402+++ b/net/ipv4/ip_vti.c
111403@@ -45,7 +45,7 @@
111404 #include <net/net_namespace.h>
111405 #include <net/netns/generic.h>
111406
111407-static struct rtnl_link_ops vti_link_ops __read_mostly;
111408+static struct rtnl_link_ops vti_link_ops;
111409
111410 static int vti_net_id __read_mostly;
111411 static int vti_tunnel_init(struct net_device *dev);
111412@@ -519,7 +519,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
111413 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
111414 };
111415
111416-static struct rtnl_link_ops vti_link_ops __read_mostly = {
111417+static struct rtnl_link_ops vti_link_ops = {
111418 .kind = "vti",
111419 .maxtype = IFLA_VTI_MAX,
111420 .policy = vti_policy,
111421diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
111422index b26376e..fc3d733 100644
111423--- a/net/ipv4/ipconfig.c
111424+++ b/net/ipv4/ipconfig.c
111425@@ -333,7 +333,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
111426
111427 mm_segment_t oldfs = get_fs();
111428 set_fs(get_ds());
111429- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
111430+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
111431 set_fs(oldfs);
111432 return res;
111433 }
111434@@ -344,7 +344,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
111435
111436 mm_segment_t oldfs = get_fs();
111437 set_fs(get_ds());
111438- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
111439+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
111440 set_fs(oldfs);
111441 return res;
111442 }
111443@@ -355,7 +355,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
111444
111445 mm_segment_t oldfs = get_fs();
111446 set_fs(get_ds());
111447- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
111448+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
111449 set_fs(oldfs);
111450 return res;
111451 }
111452diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
111453index 915d215..48d1db7 100644
111454--- a/net/ipv4/ipip.c
111455+++ b/net/ipv4/ipip.c
111456@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
111457 static int ipip_net_id __read_mostly;
111458
111459 static int ipip_tunnel_init(struct net_device *dev);
111460-static struct rtnl_link_ops ipip_link_ops __read_mostly;
111461+static struct rtnl_link_ops ipip_link_ops;
111462
111463 static int ipip_err(struct sk_buff *skb, u32 info)
111464 {
111465@@ -487,7 +487,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
111466 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
111467 };
111468
111469-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
111470+static struct rtnl_link_ops ipip_link_ops = {
111471 .kind = "ipip",
111472 .maxtype = IFLA_IPTUN_MAX,
111473 .policy = ipip_policy,
111474diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
111475index f95b6f9..2ee2097 100644
111476--- a/net/ipv4/netfilter/arp_tables.c
111477+++ b/net/ipv4/netfilter/arp_tables.c
111478@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
111479 #endif
111480
111481 static int get_info(struct net *net, void __user *user,
111482- const int *len, int compat)
111483+ int len, int compat)
111484 {
111485 char name[XT_TABLE_MAXNAMELEN];
111486 struct xt_table *t;
111487 int ret;
111488
111489- if (*len != sizeof(struct arpt_getinfo)) {
111490- duprintf("length %u != %Zu\n", *len,
111491+ if (len != sizeof(struct arpt_getinfo)) {
111492+ duprintf("length %u != %Zu\n", len,
111493 sizeof(struct arpt_getinfo));
111494 return -EINVAL;
111495 }
111496@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
111497 info.size = private->size;
111498 strcpy(info.name, name);
111499
111500- if (copy_to_user(user, &info, *len) != 0)
111501+ if (copy_to_user(user, &info, len) != 0)
111502 ret = -EFAULT;
111503 else
111504 ret = 0;
111505@@ -1690,7 +1690,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
111506
111507 switch (cmd) {
111508 case ARPT_SO_GET_INFO:
111509- ret = get_info(sock_net(sk), user, len, 1);
111510+ ret = get_info(sock_net(sk), user, *len, 1);
111511 break;
111512 case ARPT_SO_GET_ENTRIES:
111513 ret = compat_get_entries(sock_net(sk), user, len);
111514@@ -1735,7 +1735,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
111515
111516 switch (cmd) {
111517 case ARPT_SO_GET_INFO:
111518- ret = get_info(sock_net(sk), user, len, 0);
111519+ ret = get_info(sock_net(sk), user, *len, 0);
111520 break;
111521
111522 case ARPT_SO_GET_ENTRIES:
111523diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
111524index cf5e82f..75a20f5 100644
111525--- a/net/ipv4/netfilter/ip_tables.c
111526+++ b/net/ipv4/netfilter/ip_tables.c
111527@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
111528 #endif
111529
111530 static int get_info(struct net *net, void __user *user,
111531- const int *len, int compat)
111532+ int len, int compat)
111533 {
111534 char name[XT_TABLE_MAXNAMELEN];
111535 struct xt_table *t;
111536 int ret;
111537
111538- if (*len != sizeof(struct ipt_getinfo)) {
111539- duprintf("length %u != %zu\n", *len,
111540+ if (len != sizeof(struct ipt_getinfo)) {
111541+ duprintf("length %u != %zu\n", len,
111542 sizeof(struct ipt_getinfo));
111543 return -EINVAL;
111544 }
111545@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
111546 info.size = private->size;
111547 strcpy(info.name, name);
111548
111549- if (copy_to_user(user, &info, *len) != 0)
111550+ if (copy_to_user(user, &info, len) != 0)
111551 ret = -EFAULT;
111552 else
111553 ret = 0;
111554@@ -1973,7 +1973,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
111555
111556 switch (cmd) {
111557 case IPT_SO_GET_INFO:
111558- ret = get_info(sock_net(sk), user, len, 1);
111559+ ret = get_info(sock_net(sk), user, *len, 1);
111560 break;
111561 case IPT_SO_GET_ENTRIES:
111562 ret = compat_get_entries(sock_net(sk), user, len);
111563@@ -2020,7 +2020,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
111564
111565 switch (cmd) {
111566 case IPT_SO_GET_INFO:
111567- ret = get_info(sock_net(sk), user, len, 0);
111568+ ret = get_info(sock_net(sk), user, *len, 0);
111569 break;
111570
111571 case IPT_SO_GET_ENTRIES:
111572diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
111573index e90f83a..3e6acca 100644
111574--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
111575+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
111576@@ -720,7 +720,7 @@ static int clusterip_net_init(struct net *net)
111577 spin_lock_init(&cn->lock);
111578
111579 #ifdef CONFIG_PROC_FS
111580- cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
111581+ cn->procdir = proc_mkdir_restrict("ipt_CLUSTERIP", net->proc_net);
111582 if (!cn->procdir) {
111583 pr_err("Unable to proc dir entry\n");
111584 return -ENOMEM;
111585diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
111586index 787b0d6..ab6c0ba 100644
111587--- a/net/ipv4/ping.c
111588+++ b/net/ipv4/ping.c
111589@@ -59,7 +59,7 @@ struct ping_table {
111590 };
111591
111592 static struct ping_table ping_table;
111593-struct pingv6_ops pingv6_ops;
111594+struct pingv6_ops *pingv6_ops;
111595 EXPORT_SYMBOL_GPL(pingv6_ops);
111596
111597 static u16 ping_port_rover;
111598@@ -359,7 +359,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
111599 return -ENODEV;
111600 }
111601 }
111602- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
111603+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
111604 scoped);
111605 rcu_read_unlock();
111606
111607@@ -567,7 +567,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
111608 }
111609 #if IS_ENABLED(CONFIG_IPV6)
111610 } else if (skb->protocol == htons(ETH_P_IPV6)) {
111611- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
111612+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
111613 #endif
111614 }
111615
111616@@ -585,7 +585,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
111617 info, (u8 *)icmph);
111618 #if IS_ENABLED(CONFIG_IPV6)
111619 } else if (family == AF_INET6) {
111620- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
111621+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
111622 info, (u8 *)icmph);
111623 #endif
111624 }
111625@@ -919,10 +919,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
111626 }
111627
111628 if (inet6_sk(sk)->rxopt.all)
111629- pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
111630+ pingv6_ops->ip6_datagram_recv_common_ctl(sk, msg, skb);
111631 if (skb->protocol == htons(ETH_P_IPV6) &&
111632 inet6_sk(sk)->rxopt.all)
111633- pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
111634+ pingv6_ops->ip6_datagram_recv_specific_ctl(sk, msg, skb);
111635 else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
111636 ip_cmsg_recv(msg, skb);
111637 #endif
111638@@ -1117,7 +1117,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
111639 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
111640 0, sock_i_ino(sp),
111641 atomic_read(&sp->sk_refcnt), sp,
111642- atomic_read(&sp->sk_drops));
111643+ atomic_read_unchecked(&sp->sk_drops));
111644 }
111645
111646 static int ping_v4_seq_show(struct seq_file *seq, void *v)
111647diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
111648index f027a70..2e64edc 100644
111649--- a/net/ipv4/raw.c
111650+++ b/net/ipv4/raw.c
111651@@ -324,7 +324,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
111652 int raw_rcv(struct sock *sk, struct sk_buff *skb)
111653 {
111654 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
111655- atomic_inc(&sk->sk_drops);
111656+ atomic_inc_unchecked(&sk->sk_drops);
111657 kfree_skb(skb);
111658 return NET_RX_DROP;
111659 }
111660@@ -773,16 +773,20 @@ static int raw_init(struct sock *sk)
111661
111662 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
111663 {
111664+ struct icmp_filter filter;
111665+
111666 if (optlen > sizeof(struct icmp_filter))
111667 optlen = sizeof(struct icmp_filter);
111668- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
111669+ if (copy_from_user(&filter, optval, optlen))
111670 return -EFAULT;
111671+ raw_sk(sk)->filter = filter;
111672 return 0;
111673 }
111674
111675 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
111676 {
111677 int len, ret = -EFAULT;
111678+ struct icmp_filter filter;
111679
111680 if (get_user(len, optlen))
111681 goto out;
111682@@ -792,8 +796,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
111683 if (len > sizeof(struct icmp_filter))
111684 len = sizeof(struct icmp_filter);
111685 ret = -EFAULT;
111686- if (put_user(len, optlen) ||
111687- copy_to_user(optval, &raw_sk(sk)->filter, len))
111688+ filter = raw_sk(sk)->filter;
111689+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
111690 goto out;
111691 ret = 0;
111692 out: return ret;
111693@@ -1022,7 +1026,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
111694 0, 0L, 0,
111695 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
111696 0, sock_i_ino(sp),
111697- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
111698+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
111699 }
111700
111701 static int raw_seq_show(struct seq_file *seq, void *v)
111702diff --git a/net/ipv4/route.c b/net/ipv4/route.c
111703index 20fc020..3ba426f 100644
111704--- a/net/ipv4/route.c
111705+++ b/net/ipv4/route.c
111706@@ -228,7 +228,7 @@ static const struct seq_operations rt_cache_seq_ops = {
111707
111708 static int rt_cache_seq_open(struct inode *inode, struct file *file)
111709 {
111710- return seq_open(file, &rt_cache_seq_ops);
111711+ return seq_open_restrict(file, &rt_cache_seq_ops);
111712 }
111713
111714 static const struct file_operations rt_cache_seq_fops = {
111715@@ -319,7 +319,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
111716
111717 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
111718 {
111719- return seq_open(file, &rt_cpu_seq_ops);
111720+ return seq_open_restrict(file, &rt_cpu_seq_ops);
111721 }
111722
111723 static const struct file_operations rt_cpu_seq_fops = {
111724@@ -357,7 +357,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
111725
111726 static int rt_acct_proc_open(struct inode *inode, struct file *file)
111727 {
111728- return single_open(file, rt_acct_proc_show, NULL);
111729+ return single_open_restrict(file, rt_acct_proc_show, NULL);
111730 }
111731
111732 static const struct file_operations rt_acct_proc_fops = {
111733@@ -459,11 +459,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
111734
111735 #define IP_IDENTS_SZ 2048u
111736 struct ip_ident_bucket {
111737- atomic_t id;
111738+ atomic_unchecked_t id;
111739 u32 stamp32;
111740 };
111741
111742-static struct ip_ident_bucket *ip_idents __read_mostly;
111743+static struct ip_ident_bucket ip_idents[IP_IDENTS_SZ] __read_mostly;
111744
111745 /* In order to protect privacy, we add a perturbation to identifiers
111746 * if one generator is seldom used. This makes hard for an attacker
111747@@ -479,7 +479,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
111748 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
111749 delta = prandom_u32_max(now - old);
111750
111751- return atomic_add_return(segs + delta, &bucket->id) - segs;
111752+ return atomic_add_return_unchecked(segs + delta, &bucket->id) - segs;
111753 }
111754 EXPORT_SYMBOL(ip_idents_reserve);
111755
111756@@ -2639,34 +2639,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
111757 .maxlen = sizeof(int),
111758 .mode = 0200,
111759 .proc_handler = ipv4_sysctl_rtcache_flush,
111760+ .extra1 = &init_net,
111761 },
111762 { },
111763 };
111764
111765 static __net_init int sysctl_route_net_init(struct net *net)
111766 {
111767- struct ctl_table *tbl;
111768+ ctl_table_no_const *tbl = NULL;
111769
111770- tbl = ipv4_route_flush_table;
111771 if (!net_eq(net, &init_net)) {
111772- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
111773+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
111774 if (tbl == NULL)
111775 goto err_dup;
111776
111777 /* Don't export sysctls to unprivileged users */
111778 if (net->user_ns != &init_user_ns)
111779 tbl[0].procname = NULL;
111780- }
111781- tbl[0].extra1 = net;
111782+ tbl[0].extra1 = net;
111783+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
111784+ } else
111785+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
111786
111787- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
111788 if (net->ipv4.route_hdr == NULL)
111789 goto err_reg;
111790 return 0;
111791
111792 err_reg:
111793- if (tbl != ipv4_route_flush_table)
111794- kfree(tbl);
111795+ kfree(tbl);
111796 err_dup:
111797 return -ENOMEM;
111798 }
111799@@ -2689,8 +2689,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
111800
111801 static __net_init int rt_genid_init(struct net *net)
111802 {
111803- atomic_set(&net->ipv4.rt_genid, 0);
111804- atomic_set(&net->fnhe_genid, 0);
111805+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
111806+ atomic_set_unchecked(&net->fnhe_genid, 0);
111807 get_random_bytes(&net->ipv4.dev_addr_genid,
111808 sizeof(net->ipv4.dev_addr_genid));
111809 return 0;
111810@@ -2734,11 +2734,7 @@ int __init ip_rt_init(void)
111811 int rc = 0;
111812 int cpu;
111813
111814- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
111815- if (!ip_idents)
111816- panic("IP: failed to allocate ip_idents\n");
111817-
111818- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
111819+ prandom_bytes(ip_idents, sizeof(ip_idents));
111820
111821 for_each_possible_cpu(cpu) {
111822 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
111823diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
111824index d151539..5f5e247 100644
111825--- a/net/ipv4/sysctl_net_ipv4.c
111826+++ b/net/ipv4/sysctl_net_ipv4.c
111827@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
111828 container_of(table->data, struct net, ipv4.ip_local_ports.range);
111829 int ret;
111830 int range[2];
111831- struct ctl_table tmp = {
111832+ ctl_table_no_const tmp = {
111833 .data = &range,
111834 .maxlen = sizeof(range),
111835 .mode = table->mode,
111836@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
111837 int ret;
111838 gid_t urange[2];
111839 kgid_t low, high;
111840- struct ctl_table tmp = {
111841+ ctl_table_no_const tmp = {
111842 .data = &urange,
111843 .maxlen = sizeof(urange),
111844 .mode = table->mode,
111845@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
111846 void __user *buffer, size_t *lenp, loff_t *ppos)
111847 {
111848 char val[TCP_CA_NAME_MAX];
111849- struct ctl_table tbl = {
111850+ ctl_table_no_const tbl = {
111851 .data = val,
111852 .maxlen = TCP_CA_NAME_MAX,
111853 };
111854@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
111855 void __user *buffer, size_t *lenp,
111856 loff_t *ppos)
111857 {
111858- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
111859+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
111860 int ret;
111861
111862 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
111863@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
111864 void __user *buffer, size_t *lenp,
111865 loff_t *ppos)
111866 {
111867- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
111868+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
111869 int ret;
111870
111871 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
111872@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
111873 void __user *buffer, size_t *lenp,
111874 loff_t *ppos)
111875 {
111876- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
111877+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
111878 struct tcp_fastopen_context *ctxt;
111879 int ret;
111880 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
111881@@ -888,13 +888,12 @@ static struct ctl_table ipv4_net_table[] = {
111882
111883 static __net_init int ipv4_sysctl_init_net(struct net *net)
111884 {
111885- struct ctl_table *table;
111886+ ctl_table_no_const *table = NULL;
111887
111888- table = ipv4_net_table;
111889 if (!net_eq(net, &init_net)) {
111890 int i;
111891
111892- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
111893+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
111894 if (table == NULL)
111895 goto err_alloc;
111896
111897@@ -903,7 +902,10 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
111898 table[i].data += (void *)net - (void *)&init_net;
111899 }
111900
111901- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
111902+ if (!net_eq(net, &init_net))
111903+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
111904+ else
111905+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
111906 if (net->ipv4.ipv4_hdr == NULL)
111907 goto err_reg;
111908
111909diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
111910index 995a225..e1e9183 100644
111911--- a/net/ipv4/tcp.c
111912+++ b/net/ipv4/tcp.c
111913@@ -520,8 +520,10 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
111914
111915 /* Race breaker. If space is freed after
111916 * wspace test but before the flags are set,
111917- * IO signal will be lost.
111918+ * IO signal will be lost. Memory barrier
111919+ * pairs with the input side.
111920 */
111921+ smp_mb__after_atomic();
111922 if (sk_stream_is_writeable(sk))
111923 mask |= POLLOUT | POLLWRNORM;
111924 }
111925diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
111926index f501ac04..0c5a1b2 100644
111927--- a/net/ipv4/tcp_input.c
111928+++ b/net/ipv4/tcp_input.c
111929@@ -767,7 +767,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
111930 * without any lock. We want to make sure compiler wont store
111931 * intermediate values in this location.
111932 */
111933- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
111934+ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
111935 sk->sk_max_pacing_rate);
111936 }
111937
111938@@ -4541,7 +4541,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
111939 * simplifies code)
111940 */
111941 static void
111942-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
111943+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
111944 struct sk_buff *head, struct sk_buff *tail,
111945 u32 start, u32 end)
111946 {
111947@@ -4799,6 +4799,8 @@ static void tcp_check_space(struct sock *sk)
111948 {
111949 if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
111950 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
111951+ /* pairs with tcp_poll() */
111952+ smp_mb__after_atomic();
111953 if (sk->sk_socket &&
111954 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
111955 tcp_new_space(sk);
111956@@ -5525,6 +5527,7 @@ discard:
111957 tcp_paws_reject(&tp->rx_opt, 0))
111958 goto discard_and_undo;
111959
111960+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
111961 if (th->syn) {
111962 /* We see SYN without ACK. It is attempt of
111963 * simultaneous connect with crossed SYNs.
111964@@ -5575,6 +5578,7 @@ discard:
111965 goto discard;
111966 #endif
111967 }
111968+#endif
111969 /* "fifth, if neither of the SYN or RST bits is set then
111970 * drop the segment and return."
111971 */
111972@@ -5621,7 +5625,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
111973 goto discard;
111974
111975 if (th->syn) {
111976- if (th->fin)
111977+ if (th->fin || th->urg || th->psh)
111978 goto discard;
111979 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
111980 return 1;
111981diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
111982index f1756ee..8908cb0 100644
111983--- a/net/ipv4/tcp_ipv4.c
111984+++ b/net/ipv4/tcp_ipv4.c
111985@@ -89,6 +89,10 @@ int sysctl_tcp_tw_reuse __read_mostly;
111986 int sysctl_tcp_low_latency __read_mostly;
111987 EXPORT_SYMBOL(sysctl_tcp_low_latency);
111988
111989+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
111990+extern int grsec_enable_blackhole;
111991+#endif
111992+
111993 #ifdef CONFIG_TCP_MD5SIG
111994 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
111995 __be32 daddr, __be32 saddr, const struct tcphdr *th);
111996@@ -1475,6 +1479,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
111997 return 0;
111998
111999 reset:
112000+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112001+ if (!grsec_enable_blackhole)
112002+#endif
112003 tcp_v4_send_reset(rsk, skb);
112004 discard:
112005 kfree_skb(skb);
112006@@ -1639,12 +1646,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
112007 TCP_SKB_CB(skb)->sacked = 0;
112008
112009 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
112010- if (!sk)
112011+ if (!sk) {
112012+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112013+ ret = 1;
112014+#endif
112015 goto no_tcp_socket;
112016-
112017+ }
112018 process:
112019- if (sk->sk_state == TCP_TIME_WAIT)
112020+ if (sk->sk_state == TCP_TIME_WAIT) {
112021+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112022+ ret = 2;
112023+#endif
112024 goto do_time_wait;
112025+ }
112026
112027 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
112028 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
112029@@ -1700,6 +1714,10 @@ csum_error:
112030 bad_packet:
112031 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
112032 } else {
112033+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112034+ if (!grsec_enable_blackhole || (ret == 1 &&
112035+ (skb->dev->flags & IFF_LOOPBACK)))
112036+#endif
112037 tcp_v4_send_reset(NULL, skb);
112038 }
112039
112040diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
112041index dd11ac7..c0872da 100644
112042--- a/net/ipv4/tcp_minisocks.c
112043+++ b/net/ipv4/tcp_minisocks.c
112044@@ -27,6 +27,10 @@
112045 #include <net/inet_common.h>
112046 #include <net/xfrm.h>
112047
112048+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112049+extern int grsec_enable_blackhole;
112050+#endif
112051+
112052 int sysctl_tcp_syncookies __read_mostly = 1;
112053 EXPORT_SYMBOL(sysctl_tcp_syncookies);
112054
112055@@ -785,7 +789,10 @@ embryonic_reset:
112056 * avoid becoming vulnerable to outside attack aiming at
112057 * resetting legit local connections.
112058 */
112059- req->rsk_ops->send_reset(sk, skb);
112060+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112061+ if (!grsec_enable_blackhole)
112062+#endif
112063+ req->rsk_ops->send_reset(sk, skb);
112064 } else if (fastopen) { /* received a valid RST pkt */
112065 reqsk_fastopen_remove(sk, req, true);
112066 tcp_reset(sk);
112067diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
112068index ebf5ff5..4d1ff32 100644
112069--- a/net/ipv4/tcp_probe.c
112070+++ b/net/ipv4/tcp_probe.c
112071@@ -236,7 +236,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
112072 if (cnt + width >= len)
112073 break;
112074
112075- if (copy_to_user(buf + cnt, tbuf, width))
112076+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
112077 return -EFAULT;
112078 cnt += width;
112079 }
112080diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
112081index 0732b78..a82bdc6 100644
112082--- a/net/ipv4/tcp_timer.c
112083+++ b/net/ipv4/tcp_timer.c
112084@@ -22,6 +22,10 @@
112085 #include <linux/gfp.h>
112086 #include <net/tcp.h>
112087
112088+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112089+extern int grsec_lastack_retries;
112090+#endif
112091+
112092 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
112093 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
112094 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
112095@@ -194,6 +198,13 @@ static int tcp_write_timeout(struct sock *sk)
112096 }
112097 }
112098
112099+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112100+ if ((sk->sk_state == TCP_LAST_ACK) &&
112101+ (grsec_lastack_retries > 0) &&
112102+ (grsec_lastack_retries < retry_until))
112103+ retry_until = grsec_lastack_retries;
112104+#endif
112105+
112106 if (retransmits_timed_out(sk, retry_until,
112107 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
112108 /* Has it gone just too far? */
112109diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
112110index 97ef1f8b..abeb965 100644
112111--- a/net/ipv4/udp.c
112112+++ b/net/ipv4/udp.c
112113@@ -87,6 +87,7 @@
112114 #include <linux/types.h>
112115 #include <linux/fcntl.h>
112116 #include <linux/module.h>
112117+#include <linux/security.h>
112118 #include <linux/socket.h>
112119 #include <linux/sockios.h>
112120 #include <linux/igmp.h>
112121@@ -114,6 +115,10 @@
112122 #include <net/busy_poll.h>
112123 #include "udp_impl.h"
112124
112125+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112126+extern int grsec_enable_blackhole;
112127+#endif
112128+
112129 struct udp_table udp_table __read_mostly;
112130 EXPORT_SYMBOL(udp_table);
112131
112132@@ -608,6 +613,9 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
112133 return true;
112134 }
112135
112136+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
112137+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
112138+
112139 /*
112140 * This routine is called by the ICMP module when it gets some
112141 * sort of error condition. If err < 0 then the socket should
112142@@ -945,9 +953,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
112143 dport = usin->sin_port;
112144 if (dport == 0)
112145 return -EINVAL;
112146+
112147+ err = gr_search_udp_sendmsg(sk, usin);
112148+ if (err)
112149+ return err;
112150 } else {
112151 if (sk->sk_state != TCP_ESTABLISHED)
112152 return -EDESTADDRREQ;
112153+
112154+ err = gr_search_udp_sendmsg(sk, NULL);
112155+ if (err)
112156+ return err;
112157+
112158 daddr = inet->inet_daddr;
112159 dport = inet->inet_dport;
112160 /* Open fast path for connected socket.
112161@@ -1195,7 +1212,7 @@ static unsigned int first_packet_length(struct sock *sk)
112162 IS_UDPLITE(sk));
112163 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
112164 IS_UDPLITE(sk));
112165- atomic_inc(&sk->sk_drops);
112166+ atomic_inc_unchecked(&sk->sk_drops);
112167 __skb_unlink(skb, rcvq);
112168 __skb_queue_tail(&list_kill, skb);
112169 }
112170@@ -1275,6 +1292,10 @@ try_again:
112171 if (!skb)
112172 goto out;
112173
112174+ err = gr_search_udp_recvmsg(sk, skb);
112175+ if (err)
112176+ goto out_free;
112177+
112178 ulen = skb->len - sizeof(struct udphdr);
112179 copied = len;
112180 if (copied > ulen)
112181@@ -1307,7 +1328,7 @@ try_again:
112182 if (unlikely(err)) {
112183 trace_kfree_skb(skb, udp_recvmsg);
112184 if (!peeked) {
112185- atomic_inc(&sk->sk_drops);
112186+ atomic_inc_unchecked(&sk->sk_drops);
112187 UDP_INC_STATS_USER(sock_net(sk),
112188 UDP_MIB_INERRORS, is_udplite);
112189 }
112190@@ -1348,10 +1369,8 @@ csum_copy_err:
112191 }
112192 unlock_sock_fast(sk, slow);
112193
112194- if (noblock)
112195- return -EAGAIN;
112196-
112197- /* starting over for a new packet */
112198+ /* starting over for a new packet, but check if we need to yield */
112199+ cond_resched();
112200 msg->msg_flags &= ~MSG_TRUNC;
112201 goto try_again;
112202 }
112203@@ -1605,7 +1624,7 @@ csum_error:
112204 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
112205 drop:
112206 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
112207- atomic_inc(&sk->sk_drops);
112208+ atomic_inc_unchecked(&sk->sk_drops);
112209 kfree_skb(skb);
112210 return -1;
112211 }
112212@@ -1624,7 +1643,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
112213 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
112214
112215 if (!skb1) {
112216- atomic_inc(&sk->sk_drops);
112217+ atomic_inc_unchecked(&sk->sk_drops);
112218 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
112219 IS_UDPLITE(sk));
112220 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
112221@@ -1830,6 +1849,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
112222 goto csum_error;
112223
112224 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
112225+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112226+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
112227+#endif
112228 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
112229
112230 /*
112231@@ -2416,7 +2438,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
112232 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
112233 0, sock_i_ino(sp),
112234 atomic_read(&sp->sk_refcnt), sp,
112235- atomic_read(&sp->sk_drops));
112236+ atomic_read_unchecked(&sp->sk_drops));
112237 }
112238
112239 int udp4_seq_show(struct seq_file *seq, void *v)
112240diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
112241index 6156f68..d6ab46d 100644
112242--- a/net/ipv4/xfrm4_policy.c
112243+++ b/net/ipv4/xfrm4_policy.c
112244@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
112245 fl4->flowi4_tos = iph->tos;
112246 }
112247
112248-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
112249+static int xfrm4_garbage_collect(struct dst_ops *ops)
112250 {
112251 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
112252
112253- xfrm4_policy_afinfo.garbage_collect(net);
112254+ xfrm_garbage_collect_deferred(net);
112255 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
112256 }
112257
112258@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
112259
112260 static int __net_init xfrm4_net_init(struct net *net)
112261 {
112262- struct ctl_table *table;
112263+ ctl_table_no_const *table = NULL;
112264 struct ctl_table_header *hdr;
112265
112266- table = xfrm4_policy_table;
112267 if (!net_eq(net, &init_net)) {
112268- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
112269+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
112270 if (!table)
112271 goto err_alloc;
112272
112273 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
112274- }
112275-
112276- hdr = register_net_sysctl(net, "net/ipv4", table);
112277+ hdr = register_net_sysctl(net, "net/ipv4", table);
112278+ } else
112279+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
112280 if (!hdr)
112281 goto err_reg;
112282
112283@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
112284 return 0;
112285
112286 err_reg:
112287- if (!net_eq(net, &init_net))
112288- kfree(table);
112289+ kfree(table);
112290 err_alloc:
112291 return -ENOMEM;
112292 }
112293diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
112294index b603002..0de5c88 100644
112295--- a/net/ipv6/addrconf.c
112296+++ b/net/ipv6/addrconf.c
112297@@ -171,7 +171,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
112298 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
112299 .mtu6 = IPV6_MIN_MTU,
112300 .accept_ra = 1,
112301- .accept_redirects = 1,
112302+ .accept_redirects = 0,
112303 .autoconf = 1,
112304 .force_mld_version = 0,
112305 .mldv1_unsolicited_report_interval = 10 * HZ,
112306@@ -209,7 +209,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
112307 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
112308 .mtu6 = IPV6_MIN_MTU,
112309 .accept_ra = 1,
112310- .accept_redirects = 1,
112311+ .accept_redirects = 0,
112312 .autoconf = 1,
112313 .force_mld_version = 0,
112314 .mldv1_unsolicited_report_interval = 10 * HZ,
112315@@ -607,7 +607,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
112316 idx = 0;
112317 head = &net->dev_index_head[h];
112318 rcu_read_lock();
112319- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
112320+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
112321 net->dev_base_seq;
112322 hlist_for_each_entry_rcu(dev, head, index_hlist) {
112323 if (idx < s_idx)
112324@@ -2438,7 +2438,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
112325 p.iph.ihl = 5;
112326 p.iph.protocol = IPPROTO_IPV6;
112327 p.iph.ttl = 64;
112328- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
112329+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
112330
112331 if (ops->ndo_do_ioctl) {
112332 mm_segment_t oldfs = get_fs();
112333@@ -3587,16 +3587,23 @@ static const struct file_operations if6_fops = {
112334 .release = seq_release_net,
112335 };
112336
112337+extern void register_ipv6_seq_ops_addr(struct seq_operations *addr);
112338+extern void unregister_ipv6_seq_ops_addr(void);
112339+
112340 static int __net_init if6_proc_net_init(struct net *net)
112341 {
112342- if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
112343+ register_ipv6_seq_ops_addr(&if6_seq_ops);
112344+ if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops)) {
112345+ unregister_ipv6_seq_ops_addr();
112346 return -ENOMEM;
112347+ }
112348 return 0;
112349 }
112350
112351 static void __net_exit if6_proc_net_exit(struct net *net)
112352 {
112353 remove_proc_entry("if_inet6", net->proc_net);
112354+ unregister_ipv6_seq_ops_addr();
112355 }
112356
112357 static struct pernet_operations if6_proc_net_ops = {
112358@@ -4215,7 +4222,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
112359 s_ip_idx = ip_idx = cb->args[2];
112360
112361 rcu_read_lock();
112362- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
112363+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
112364 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
112365 idx = 0;
112366 head = &net->dev_index_head[h];
112367@@ -4864,7 +4871,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
112368 rt_genid_bump_ipv6(net);
112369 break;
112370 }
112371- atomic_inc(&net->ipv6.dev_addr_genid);
112372+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
112373 }
112374
112375 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
112376@@ -4884,7 +4891,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
112377 int *valp = ctl->data;
112378 int val = *valp;
112379 loff_t pos = *ppos;
112380- struct ctl_table lctl;
112381+ ctl_table_no_const lctl;
112382 int ret;
112383
112384 /*
112385@@ -4909,7 +4916,7 @@ int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
112386 {
112387 struct inet6_dev *idev = ctl->extra1;
112388 int min_mtu = IPV6_MIN_MTU;
112389- struct ctl_table lctl;
112390+ ctl_table_no_const lctl;
112391
112392 lctl = *ctl;
112393 lctl.extra1 = &min_mtu;
112394@@ -4984,7 +4991,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
112395 int *valp = ctl->data;
112396 int val = *valp;
112397 loff_t pos = *ppos;
112398- struct ctl_table lctl;
112399+ ctl_table_no_const lctl;
112400 int ret;
112401
112402 /*
112403diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
112404index e8c4400..a4cd5da 100644
112405--- a/net/ipv6/af_inet6.c
112406+++ b/net/ipv6/af_inet6.c
112407@@ -766,7 +766,7 @@ static int __net_init inet6_net_init(struct net *net)
112408 net->ipv6.sysctl.icmpv6_time = 1*HZ;
112409 net->ipv6.sysctl.flowlabel_consistency = 1;
112410 net->ipv6.sysctl.auto_flowlabels = 0;
112411- atomic_set(&net->ipv6.fib6_sernum, 1);
112412+ atomic_set_unchecked(&net->ipv6.fib6_sernum, 1);
112413
112414 err = ipv6_init_mibs(net);
112415 if (err)
112416diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
112417index ace8dac..bd6942d 100644
112418--- a/net/ipv6/datagram.c
112419+++ b/net/ipv6/datagram.c
112420@@ -957,5 +957,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
112421 0,
112422 sock_i_ino(sp),
112423 atomic_read(&sp->sk_refcnt), sp,
112424- atomic_read(&sp->sk_drops));
112425+ atomic_read_unchecked(&sp->sk_drops));
112426 }
112427diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
112428index a5e9519..16b7412 100644
112429--- a/net/ipv6/icmp.c
112430+++ b/net/ipv6/icmp.c
112431@@ -1005,7 +1005,7 @@ static struct ctl_table ipv6_icmp_table_template[] = {
112432
112433 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
112434 {
112435- struct ctl_table *table;
112436+ ctl_table_no_const *table;
112437
112438 table = kmemdup(ipv6_icmp_table_template,
112439 sizeof(ipv6_icmp_table_template),
112440diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
112441index 263ef41..88c7be8 100644
112442--- a/net/ipv6/ip6_fib.c
112443+++ b/net/ipv6/ip6_fib.c
112444@@ -99,9 +99,9 @@ static int fib6_new_sernum(struct net *net)
112445 int new, old;
112446
112447 do {
112448- old = atomic_read(&net->ipv6.fib6_sernum);
112449+ old = atomic_read_unchecked(&net->ipv6.fib6_sernum);
112450 new = old < INT_MAX ? old + 1 : 1;
112451- } while (atomic_cmpxchg(&net->ipv6.fib6_sernum,
112452+ } while (atomic_cmpxchg_unchecked(&net->ipv6.fib6_sernum,
112453 old, new) != old);
112454 return new;
112455 }
112456diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
112457index bc28b7d..a08feea 100644
112458--- a/net/ipv6/ip6_gre.c
112459+++ b/net/ipv6/ip6_gre.c
112460@@ -71,8 +71,8 @@ struct ip6gre_net {
112461 struct net_device *fb_tunnel_dev;
112462 };
112463
112464-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
112465-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
112466+static struct rtnl_link_ops ip6gre_link_ops;
112467+static struct rtnl_link_ops ip6gre_tap_ops;
112468 static int ip6gre_tunnel_init(struct net_device *dev);
112469 static void ip6gre_tunnel_setup(struct net_device *dev);
112470 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
112471@@ -1289,7 +1289,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
112472 }
112473
112474
112475-static struct inet6_protocol ip6gre_protocol __read_mostly = {
112476+static struct inet6_protocol ip6gre_protocol = {
112477 .handler = ip6gre_rcv,
112478 .err_handler = ip6gre_err,
112479 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
112480@@ -1650,7 +1650,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
112481 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
112482 };
112483
112484-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
112485+static struct rtnl_link_ops ip6gre_link_ops = {
112486 .kind = "ip6gre",
112487 .maxtype = IFLA_GRE_MAX,
112488 .policy = ip6gre_policy,
112489@@ -1665,7 +1665,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
112490 .get_link_net = ip6_tnl_get_link_net,
112491 };
112492
112493-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
112494+static struct rtnl_link_ops ip6gre_tap_ops = {
112495 .kind = "ip6gretap",
112496 .maxtype = IFLA_GRE_MAX,
112497 .policy = ip6gre_policy,
112498diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
112499index ddd94ec..b7cfefb 100644
112500--- a/net/ipv6/ip6_tunnel.c
112501+++ b/net/ipv6/ip6_tunnel.c
112502@@ -86,7 +86,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
112503
112504 static int ip6_tnl_dev_init(struct net_device *dev);
112505 static void ip6_tnl_dev_setup(struct net_device *dev);
112506-static struct rtnl_link_ops ip6_link_ops __read_mostly;
112507+static struct rtnl_link_ops ip6_link_ops;
112508
112509 static int ip6_tnl_net_id __read_mostly;
112510 struct ip6_tnl_net {
112511@@ -1780,7 +1780,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
112512 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
112513 };
112514
112515-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
112516+static struct rtnl_link_ops ip6_link_ops = {
112517 .kind = "ip6tnl",
112518 .maxtype = IFLA_IPTUN_MAX,
112519 .policy = ip6_tnl_policy,
112520diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
112521index 5fb9e21..92bf04b 100644
112522--- a/net/ipv6/ip6_vti.c
112523+++ b/net/ipv6/ip6_vti.c
112524@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
112525
112526 static int vti6_dev_init(struct net_device *dev);
112527 static void vti6_dev_setup(struct net_device *dev);
112528-static struct rtnl_link_ops vti6_link_ops __read_mostly;
112529+static struct rtnl_link_ops vti6_link_ops;
112530
112531 static int vti6_net_id __read_mostly;
112532 struct vti6_net {
112533@@ -1004,7 +1004,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
112534 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
112535 };
112536
112537-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
112538+static struct rtnl_link_ops vti6_link_ops = {
112539 .kind = "vti6",
112540 .maxtype = IFLA_VTI_MAX,
112541 .policy = vti6_policy,
112542diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
112543index 8d766d9..dcdfea7 100644
112544--- a/net/ipv6/ipv6_sockglue.c
112545+++ b/net/ipv6/ipv6_sockglue.c
112546@@ -989,7 +989,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
112547 if (sk->sk_type != SOCK_STREAM)
112548 return -ENOPROTOOPT;
112549
112550- msg.msg_control = optval;
112551+ msg.msg_control = (void __force_kernel *)optval;
112552 msg.msg_controllen = len;
112553 msg.msg_flags = flags;
112554
112555diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
112556index bb00c6f..16c90d7 100644
112557--- a/net/ipv6/netfilter/ip6_tables.c
112558+++ b/net/ipv6/netfilter/ip6_tables.c
112559@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
112560 #endif
112561
112562 static int get_info(struct net *net, void __user *user,
112563- const int *len, int compat)
112564+ int len, int compat)
112565 {
112566 char name[XT_TABLE_MAXNAMELEN];
112567 struct xt_table *t;
112568 int ret;
112569
112570- if (*len != sizeof(struct ip6t_getinfo)) {
112571- duprintf("length %u != %zu\n", *len,
112572+ if (len != sizeof(struct ip6t_getinfo)) {
112573+ duprintf("length %u != %zu\n", len,
112574 sizeof(struct ip6t_getinfo));
112575 return -EINVAL;
112576 }
112577@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
112578 info.size = private->size;
112579 strcpy(info.name, name);
112580
112581- if (copy_to_user(user, &info, *len) != 0)
112582+ if (copy_to_user(user, &info, len) != 0)
112583 ret = -EFAULT;
112584 else
112585 ret = 0;
112586@@ -1983,7 +1983,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
112587
112588 switch (cmd) {
112589 case IP6T_SO_GET_INFO:
112590- ret = get_info(sock_net(sk), user, len, 1);
112591+ ret = get_info(sock_net(sk), user, *len, 1);
112592 break;
112593 case IP6T_SO_GET_ENTRIES:
112594 ret = compat_get_entries(sock_net(sk), user, len);
112595@@ -2030,7 +2030,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
112596
112597 switch (cmd) {
112598 case IP6T_SO_GET_INFO:
112599- ret = get_info(sock_net(sk), user, len, 0);
112600+ ret = get_info(sock_net(sk), user, *len, 0);
112601 break;
112602
112603 case IP6T_SO_GET_ENTRIES:
112604diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
112605index 6f187c8..34b367f 100644
112606--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
112607+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
112608@@ -96,12 +96,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
112609
112610 static int nf_ct_frag6_sysctl_register(struct net *net)
112611 {
112612- struct ctl_table *table;
112613+ ctl_table_no_const *table = NULL;
112614 struct ctl_table_header *hdr;
112615
112616- table = nf_ct_frag6_sysctl_table;
112617 if (!net_eq(net, &init_net)) {
112618- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
112619+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
112620 GFP_KERNEL);
112621 if (table == NULL)
112622 goto err_alloc;
112623@@ -112,9 +111,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
112624 table[2].data = &net->nf_frag.frags.high_thresh;
112625 table[2].extra1 = &net->nf_frag.frags.low_thresh;
112626 table[2].extra2 = &init_net.nf_frag.frags.high_thresh;
112627- }
112628-
112629- hdr = register_net_sysctl(net, "net/netfilter", table);
112630+ hdr = register_net_sysctl(net, "net/netfilter", table);
112631+ } else
112632+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
112633 if (hdr == NULL)
112634 goto err_reg;
112635
112636@@ -122,8 +121,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
112637 return 0;
112638
112639 err_reg:
112640- if (!net_eq(net, &init_net))
112641- kfree(table);
112642+ kfree(table);
112643 err_alloc:
112644 return -ENOMEM;
112645 }
112646diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
112647index a2dfff6..1e52e6d 100644
112648--- a/net/ipv6/ping.c
112649+++ b/net/ipv6/ping.c
112650@@ -241,6 +241,24 @@ static struct pernet_operations ping_v6_net_ops = {
112651 };
112652 #endif
112653
112654+static struct pingv6_ops real_pingv6_ops = {
112655+ .ipv6_recv_error = ipv6_recv_error,
112656+ .ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl,
112657+ .ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl,
112658+ .icmpv6_err_convert = icmpv6_err_convert,
112659+ .ipv6_icmp_error = ipv6_icmp_error,
112660+ .ipv6_chk_addr = ipv6_chk_addr,
112661+};
112662+
112663+static struct pingv6_ops dummy_pingv6_ops = {
112664+ .ipv6_recv_error = dummy_ipv6_recv_error,
112665+ .ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl,
112666+ .ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl,
112667+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
112668+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
112669+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
112670+};
112671+
112672 int __init pingv6_init(void)
112673 {
112674 #ifdef CONFIG_PROC_FS
112675@@ -248,13 +266,7 @@ int __init pingv6_init(void)
112676 if (ret)
112677 return ret;
112678 #endif
112679- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
112680- pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl;
112681- pingv6_ops.ip6_datagram_recv_specific_ctl =
112682- ip6_datagram_recv_specific_ctl;
112683- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
112684- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
112685- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
112686+ pingv6_ops = &real_pingv6_ops;
112687 return inet6_register_protosw(&pingv6_protosw);
112688 }
112689
112690@@ -263,14 +275,9 @@ int __init pingv6_init(void)
112691 */
112692 void pingv6_exit(void)
112693 {
112694- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
112695- pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl;
112696- pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl;
112697- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
112698- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
112699- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
112700 #ifdef CONFIG_PROC_FS
112701 unregister_pernet_subsys(&ping_v6_net_ops);
112702 #endif
112703+ pingv6_ops = &dummy_pingv6_ops;
112704 inet6_unregister_protosw(&pingv6_protosw);
112705 }
112706diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
112707index 679253d0..70b653c 100644
112708--- a/net/ipv6/proc.c
112709+++ b/net/ipv6/proc.c
112710@@ -310,7 +310,7 @@ static int __net_init ipv6_proc_init_net(struct net *net)
112711 if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
112712 goto proc_snmp6_fail;
112713
112714- net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
112715+ net->mib.proc_net_devsnmp6 = proc_mkdir_restrict("dev_snmp6", net->proc_net);
112716 if (!net->mib.proc_net_devsnmp6)
112717 goto proc_dev_snmp6_fail;
112718 return 0;
112719diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
112720index dae7f1a..783b20d 100644
112721--- a/net/ipv6/raw.c
112722+++ b/net/ipv6/raw.c
112723@@ -388,7 +388,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
112724 {
112725 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
112726 skb_checksum_complete(skb)) {
112727- atomic_inc(&sk->sk_drops);
112728+ atomic_inc_unchecked(&sk->sk_drops);
112729 kfree_skb(skb);
112730 return NET_RX_DROP;
112731 }
112732@@ -416,7 +416,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
112733 struct raw6_sock *rp = raw6_sk(sk);
112734
112735 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
112736- atomic_inc(&sk->sk_drops);
112737+ atomic_inc_unchecked(&sk->sk_drops);
112738 kfree_skb(skb);
112739 return NET_RX_DROP;
112740 }
112741@@ -440,7 +440,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
112742
112743 if (inet->hdrincl) {
112744 if (skb_checksum_complete(skb)) {
112745- atomic_inc(&sk->sk_drops);
112746+ atomic_inc_unchecked(&sk->sk_drops);
112747 kfree_skb(skb);
112748 return NET_RX_DROP;
112749 }
112750@@ -609,7 +609,7 @@ out:
112751 return err;
112752 }
112753
112754-static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
112755+static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, unsigned int length,
112756 struct flowi6 *fl6, struct dst_entry **dstp,
112757 unsigned int flags)
112758 {
112759@@ -915,12 +915,15 @@ do_confirm:
112760 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
112761 char __user *optval, int optlen)
112762 {
112763+ struct icmp6_filter filter;
112764+
112765 switch (optname) {
112766 case ICMPV6_FILTER:
112767 if (optlen > sizeof(struct icmp6_filter))
112768 optlen = sizeof(struct icmp6_filter);
112769- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
112770+ if (copy_from_user(&filter, optval, optlen))
112771 return -EFAULT;
112772+ raw6_sk(sk)->filter = filter;
112773 return 0;
112774 default:
112775 return -ENOPROTOOPT;
112776@@ -933,6 +936,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
112777 char __user *optval, int __user *optlen)
112778 {
112779 int len;
112780+ struct icmp6_filter filter;
112781
112782 switch (optname) {
112783 case ICMPV6_FILTER:
112784@@ -944,7 +948,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
112785 len = sizeof(struct icmp6_filter);
112786 if (put_user(len, optlen))
112787 return -EFAULT;
112788- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
112789+ filter = raw6_sk(sk)->filter;
112790+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
112791 return -EFAULT;
112792 return 0;
112793 default:
112794diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
112795index d7d70e6..bd5e9fc 100644
112796--- a/net/ipv6/reassembly.c
112797+++ b/net/ipv6/reassembly.c
112798@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
112799
112800 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
112801 {
112802- struct ctl_table *table;
112803+ ctl_table_no_const *table = NULL;
112804 struct ctl_table_header *hdr;
112805
112806- table = ip6_frags_ns_ctl_table;
112807 if (!net_eq(net, &init_net)) {
112808- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
112809+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
112810 if (table == NULL)
112811 goto err_alloc;
112812
112813@@ -645,9 +644,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
112814 /* Don't export sysctls to unprivileged users */
112815 if (net->user_ns != &init_user_ns)
112816 table[0].procname = NULL;
112817- }
112818+ hdr = register_net_sysctl(net, "net/ipv6", table);
112819+ } else
112820+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
112821
112822- hdr = register_net_sysctl(net, "net/ipv6", table);
112823 if (hdr == NULL)
112824 goto err_reg;
112825
112826@@ -655,8 +655,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
112827 return 0;
112828
112829 err_reg:
112830- if (!net_eq(net, &init_net))
112831- kfree(table);
112832+ kfree(table);
112833 err_alloc:
112834 return -ENOMEM;
112835 }
112836diff --git a/net/ipv6/route.c b/net/ipv6/route.c
112837index 4688bd4..584453d 100644
112838--- a/net/ipv6/route.c
112839+++ b/net/ipv6/route.c
112840@@ -3029,7 +3029,7 @@ struct ctl_table ipv6_route_table_template[] = {
112841
112842 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
112843 {
112844- struct ctl_table *table;
112845+ ctl_table_no_const *table;
112846
112847 table = kmemdup(ipv6_route_table_template,
112848 sizeof(ipv6_route_table_template),
112849diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
112850index e4cbd57..02b1aaa 100644
112851--- a/net/ipv6/sit.c
112852+++ b/net/ipv6/sit.c
112853@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
112854 static void ipip6_dev_free(struct net_device *dev);
112855 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
112856 __be32 *v4dst);
112857-static struct rtnl_link_ops sit_link_ops __read_mostly;
112858+static struct rtnl_link_ops sit_link_ops;
112859
112860 static int sit_net_id __read_mostly;
112861 struct sit_net {
112862@@ -1751,7 +1751,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
112863 unregister_netdevice_queue(dev, head);
112864 }
112865
112866-static struct rtnl_link_ops sit_link_ops __read_mostly = {
112867+static struct rtnl_link_ops sit_link_ops = {
112868 .kind = "sit",
112869 .maxtype = IFLA_IPTUN_MAX,
112870 .policy = ipip6_policy,
112871diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
112872index c5c10fa..2577d51 100644
112873--- a/net/ipv6/sysctl_net_ipv6.c
112874+++ b/net/ipv6/sysctl_net_ipv6.c
112875@@ -78,7 +78,7 @@ static struct ctl_table ipv6_rotable[] = {
112876
112877 static int __net_init ipv6_sysctl_net_init(struct net *net)
112878 {
112879- struct ctl_table *ipv6_table;
112880+ ctl_table_no_const *ipv6_table;
112881 struct ctl_table *ipv6_route_table;
112882 struct ctl_table *ipv6_icmp_table;
112883 int err;
112884diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
112885index 1f5e622..8387d90 100644
112886--- a/net/ipv6/tcp_ipv6.c
112887+++ b/net/ipv6/tcp_ipv6.c
112888@@ -104,6 +104,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
112889 }
112890 }
112891
112892+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112893+extern int grsec_enable_blackhole;
112894+#endif
112895+
112896 static void tcp_v6_hash(struct sock *sk)
112897 {
112898 if (sk->sk_state != TCP_CLOSE) {
112899@@ -1345,6 +1349,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
112900 return 0;
112901
112902 reset:
112903+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112904+ if (!grsec_enable_blackhole)
112905+#endif
112906 tcp_v6_send_reset(sk, skb);
112907 discard:
112908 if (opt_skb)
112909@@ -1454,12 +1461,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
112910
112911 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
112912 inet6_iif(skb));
112913- if (!sk)
112914+ if (!sk) {
112915+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112916+ ret = 1;
112917+#endif
112918 goto no_tcp_socket;
112919+ }
112920
112921 process:
112922- if (sk->sk_state == TCP_TIME_WAIT)
112923+ if (sk->sk_state == TCP_TIME_WAIT) {
112924+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112925+ ret = 2;
112926+#endif
112927 goto do_time_wait;
112928+ }
112929
112930 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
112931 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
112932@@ -1510,6 +1525,10 @@ csum_error:
112933 bad_packet:
112934 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
112935 } else {
112936+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112937+ if (!grsec_enable_blackhole || (ret == 1 &&
112938+ (skb->dev->flags & IFF_LOOPBACK)))
112939+#endif
112940 tcp_v6_send_reset(NULL, skb);
112941 }
112942
112943diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
112944index d048d46..cacb4d2 100644
112945--- a/net/ipv6/udp.c
112946+++ b/net/ipv6/udp.c
112947@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
112948 udp_ipv6_hash_secret + net_hash_mix(net));
112949 }
112950
112951+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112952+extern int grsec_enable_blackhole;
112953+#endif
112954+
112955 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
112956 {
112957 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
112958@@ -448,7 +452,7 @@ try_again:
112959 if (unlikely(err)) {
112960 trace_kfree_skb(skb, udpv6_recvmsg);
112961 if (!peeked) {
112962- atomic_inc(&sk->sk_drops);
112963+ atomic_inc_unchecked(&sk->sk_drops);
112964 if (is_udp4)
112965 UDP_INC_STATS_USER(sock_net(sk),
112966 UDP_MIB_INERRORS,
112967@@ -528,10 +532,8 @@ csum_copy_err:
112968 }
112969 unlock_sock_fast(sk, slow);
112970
112971- if (noblock)
112972- return -EAGAIN;
112973-
112974- /* starting over for a new packet */
112975+ /* starting over for a new packet, but check if we need to yield */
112976+ cond_resched();
112977 msg->msg_flags &= ~MSG_TRUNC;
112978 goto try_again;
112979 }
112980@@ -714,7 +716,7 @@ csum_error:
112981 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
112982 drop:
112983 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
112984- atomic_inc(&sk->sk_drops);
112985+ atomic_inc_unchecked(&sk->sk_drops);
112986 kfree_skb(skb);
112987 return -1;
112988 }
112989@@ -753,7 +755,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
112990 if (likely(skb1 == NULL))
112991 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
112992 if (!skb1) {
112993- atomic_inc(&sk->sk_drops);
112994+ atomic_inc_unchecked(&sk->sk_drops);
112995 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
112996 IS_UDPLITE(sk));
112997 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
112998@@ -937,6 +939,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
112999 goto csum_error;
113000
113001 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
113002+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
113003+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
113004+#endif
113005 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
113006
113007 kfree_skb(skb);
113008diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
113009index 8d2d01b4..313511e 100644
113010--- a/net/ipv6/xfrm6_policy.c
113011+++ b/net/ipv6/xfrm6_policy.c
113012@@ -224,11 +224,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
113013 }
113014 }
113015
113016-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
113017+static int xfrm6_garbage_collect(struct dst_ops *ops)
113018 {
113019 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
113020
113021- xfrm6_policy_afinfo.garbage_collect(net);
113022+ xfrm_garbage_collect_deferred(net);
113023 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
113024 }
113025
113026@@ -341,19 +341,19 @@ static struct ctl_table xfrm6_policy_table[] = {
113027
113028 static int __net_init xfrm6_net_init(struct net *net)
113029 {
113030- struct ctl_table *table;
113031+ ctl_table_no_const *table = NULL;
113032 struct ctl_table_header *hdr;
113033
113034- table = xfrm6_policy_table;
113035 if (!net_eq(net, &init_net)) {
113036- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
113037+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
113038 if (!table)
113039 goto err_alloc;
113040
113041 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
113042- }
113043+ hdr = register_net_sysctl(net, "net/ipv6", table);
113044+ } else
113045+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
113046
113047- hdr = register_net_sysctl(net, "net/ipv6", table);
113048 if (!hdr)
113049 goto err_reg;
113050
113051@@ -361,8 +361,7 @@ static int __net_init xfrm6_net_init(struct net *net)
113052 return 0;
113053
113054 err_reg:
113055- if (!net_eq(net, &init_net))
113056- kfree(table);
113057+ kfree(table);
113058 err_alloc:
113059 return -ENOMEM;
113060 }
113061diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
113062index c1d247e..9e5949d 100644
113063--- a/net/ipx/ipx_proc.c
113064+++ b/net/ipx/ipx_proc.c
113065@@ -289,7 +289,7 @@ int __init ipx_proc_init(void)
113066 struct proc_dir_entry *p;
113067 int rc = -ENOMEM;
113068
113069- ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
113070+ ipx_proc_dir = proc_mkdir_restrict("ipx", init_net.proc_net);
113071
113072 if (!ipx_proc_dir)
113073 goto out;
113074diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
113075index 683346d..cb0e12d 100644
113076--- a/net/irda/ircomm/ircomm_tty.c
113077+++ b/net/irda/ircomm/ircomm_tty.c
113078@@ -310,10 +310,10 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
113079 add_wait_queue(&port->open_wait, &wait);
113080
113081 pr_debug("%s(%d):block_til_ready before block on %s open_count=%d\n",
113082- __FILE__, __LINE__, tty->driver->name, port->count);
113083+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
113084
113085 spin_lock_irqsave(&port->lock, flags);
113086- port->count--;
113087+ atomic_dec(&port->count);
113088 port->blocked_open++;
113089 spin_unlock_irqrestore(&port->lock, flags);
113090
113091@@ -348,7 +348,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
113092 }
113093
113094 pr_debug("%s(%d):block_til_ready blocking on %s open_count=%d\n",
113095- __FILE__, __LINE__, tty->driver->name, port->count);
113096+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
113097
113098 schedule();
113099 }
113100@@ -358,12 +358,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
113101
113102 spin_lock_irqsave(&port->lock, flags);
113103 if (!tty_hung_up_p(filp))
113104- port->count++;
113105+ atomic_inc(&port->count);
113106 port->blocked_open--;
113107 spin_unlock_irqrestore(&port->lock, flags);
113108
113109 pr_debug("%s(%d):block_til_ready after blocking on %s open_count=%d\n",
113110- __FILE__, __LINE__, tty->driver->name, port->count);
113111+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
113112
113113 if (!retval)
113114 port->flags |= ASYNC_NORMAL_ACTIVE;
113115@@ -433,12 +433,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
113116
113117 /* ++ is not atomic, so this should be protected - Jean II */
113118 spin_lock_irqsave(&self->port.lock, flags);
113119- self->port.count++;
113120+ atomic_inc(&self->port.count);
113121 spin_unlock_irqrestore(&self->port.lock, flags);
113122 tty_port_tty_set(&self->port, tty);
113123
113124 pr_debug("%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
113125- self->line, self->port.count);
113126+ self->line, atomic_read(&self->port.count));
113127
113128 /* Not really used by us, but lets do it anyway */
113129 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
113130@@ -961,7 +961,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
113131 tty_kref_put(port->tty);
113132 }
113133 port->tty = NULL;
113134- port->count = 0;
113135+ atomic_set(&port->count, 0);
113136 spin_unlock_irqrestore(&port->lock, flags);
113137
113138 wake_up_interruptible(&port->open_wait);
113139@@ -1308,7 +1308,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
113140 seq_putc(m, '\n');
113141
113142 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
113143- seq_printf(m, "Open count: %d\n", self->port.count);
113144+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
113145 seq_printf(m, "Max data size: %d\n", self->max_data_size);
113146 seq_printf(m, "Max header size: %d\n", self->max_header_size);
113147
113148diff --git a/net/irda/irproc.c b/net/irda/irproc.c
113149index b9ac598..f88cc56 100644
113150--- a/net/irda/irproc.c
113151+++ b/net/irda/irproc.c
113152@@ -66,7 +66,7 @@ void __init irda_proc_register(void)
113153 {
113154 int i;
113155
113156- proc_irda = proc_mkdir("irda", init_net.proc_net);
113157+ proc_irda = proc_mkdir_restrict("irda", init_net.proc_net);
113158 if (proc_irda == NULL)
113159 return;
113160
113161diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
113162index 53d9311..cbaf99f 100644
113163--- a/net/iucv/af_iucv.c
113164+++ b/net/iucv/af_iucv.c
113165@@ -686,10 +686,10 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
113166 {
113167 char name[12];
113168
113169- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
113170+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
113171 while (__iucv_get_sock_by_name(name)) {
113172 sprintf(name, "%08x",
113173- atomic_inc_return(&iucv_sk_list.autobind_name));
113174+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
113175 }
113176 memcpy(iucv->src_name, name, 8);
113177 }
113178diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
113179index 2a6a1fd..6c112b0 100644
113180--- a/net/iucv/iucv.c
113181+++ b/net/iucv/iucv.c
113182@@ -702,7 +702,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
113183 return NOTIFY_OK;
113184 }
113185
113186-static struct notifier_block __refdata iucv_cpu_notifier = {
113187+static struct notifier_block iucv_cpu_notifier = {
113188 .notifier_call = iucv_cpu_notify,
113189 };
113190
113191diff --git a/net/key/af_key.c b/net/key/af_key.c
113192index f8ac939..1e189bf 100644
113193--- a/net/key/af_key.c
113194+++ b/net/key/af_key.c
113195@@ -3049,10 +3049,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
113196 static u32 get_acqseq(void)
113197 {
113198 u32 res;
113199- static atomic_t acqseq;
113200+ static atomic_unchecked_t acqseq;
113201
113202 do {
113203- res = atomic_inc_return(&acqseq);
113204+ res = atomic_inc_return_unchecked(&acqseq);
113205 } while (!res);
113206 return res;
113207 }
113208diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
113209index 781b3a2..73a7434 100644
113210--- a/net/l2tp/l2tp_eth.c
113211+++ b/net/l2tp/l2tp_eth.c
113212@@ -42,12 +42,12 @@ struct l2tp_eth {
113213 struct sock *tunnel_sock;
113214 struct l2tp_session *session;
113215 struct list_head list;
113216- atomic_long_t tx_bytes;
113217- atomic_long_t tx_packets;
113218- atomic_long_t tx_dropped;
113219- atomic_long_t rx_bytes;
113220- atomic_long_t rx_packets;
113221- atomic_long_t rx_errors;
113222+ atomic_long_unchecked_t tx_bytes;
113223+ atomic_long_unchecked_t tx_packets;
113224+ atomic_long_unchecked_t tx_dropped;
113225+ atomic_long_unchecked_t rx_bytes;
113226+ atomic_long_unchecked_t rx_packets;
113227+ atomic_long_unchecked_t rx_errors;
113228 };
113229
113230 /* via l2tp_session_priv() */
113231@@ -98,10 +98,10 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
113232 int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
113233
113234 if (likely(ret == NET_XMIT_SUCCESS)) {
113235- atomic_long_add(len, &priv->tx_bytes);
113236- atomic_long_inc(&priv->tx_packets);
113237+ atomic_long_add_unchecked(len, &priv->tx_bytes);
113238+ atomic_long_inc_unchecked(&priv->tx_packets);
113239 } else {
113240- atomic_long_inc(&priv->tx_dropped);
113241+ atomic_long_inc_unchecked(&priv->tx_dropped);
113242 }
113243 return NETDEV_TX_OK;
113244 }
113245@@ -111,12 +111,12 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
113246 {
113247 struct l2tp_eth *priv = netdev_priv(dev);
113248
113249- stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
113250- stats->tx_packets = atomic_long_read(&priv->tx_packets);
113251- stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
113252- stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
113253- stats->rx_packets = atomic_long_read(&priv->rx_packets);
113254- stats->rx_errors = atomic_long_read(&priv->rx_errors);
113255+ stats->tx_bytes = atomic_long_read_unchecked(&priv->tx_bytes);
113256+ stats->tx_packets = atomic_long_read_unchecked(&priv->tx_packets);
113257+ stats->tx_dropped = atomic_long_read_unchecked(&priv->tx_dropped);
113258+ stats->rx_bytes = atomic_long_read_unchecked(&priv->rx_bytes);
113259+ stats->rx_packets = atomic_long_read_unchecked(&priv->rx_packets);
113260+ stats->rx_errors = atomic_long_read_unchecked(&priv->rx_errors);
113261 return stats;
113262 }
113263
113264@@ -167,15 +167,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
113265 nf_reset(skb);
113266
113267 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
113268- atomic_long_inc(&priv->rx_packets);
113269- atomic_long_add(data_len, &priv->rx_bytes);
113270+ atomic_long_inc_unchecked(&priv->rx_packets);
113271+ atomic_long_add_unchecked(data_len, &priv->rx_bytes);
113272 } else {
113273- atomic_long_inc(&priv->rx_errors);
113274+ atomic_long_inc_unchecked(&priv->rx_errors);
113275 }
113276 return;
113277
113278 error:
113279- atomic_long_inc(&priv->rx_errors);
113280+ atomic_long_inc_unchecked(&priv->rx_errors);
113281 kfree_skb(skb);
113282 }
113283
113284diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
113285index 05dfc8aa..df6cfd7 100644
113286--- a/net/l2tp/l2tp_ip.c
113287+++ b/net/l2tp/l2tp_ip.c
113288@@ -608,7 +608,7 @@ static struct inet_protosw l2tp_ip_protosw = {
113289 .ops = &l2tp_ip_ops,
113290 };
113291
113292-static struct net_protocol l2tp_ip_protocol __read_mostly = {
113293+static const struct net_protocol l2tp_ip_protocol = {
113294 .handler = l2tp_ip_recv,
113295 .netns_ok = 1,
113296 };
113297diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
113298index 8611f1b..bc60a2d 100644
113299--- a/net/l2tp/l2tp_ip6.c
113300+++ b/net/l2tp/l2tp_ip6.c
113301@@ -757,7 +757,7 @@ static struct inet_protosw l2tp_ip6_protosw = {
113302 .ops = &l2tp_ip6_ops,
113303 };
113304
113305-static struct inet6_protocol l2tp_ip6_protocol __read_mostly = {
113306+static const struct inet6_protocol l2tp_ip6_protocol = {
113307 .handler = l2tp_ip6_recv,
113308 };
113309
113310diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
113311index 1a3c7e0..80f8b0c 100644
113312--- a/net/llc/llc_proc.c
113313+++ b/net/llc/llc_proc.c
113314@@ -247,7 +247,7 @@ int __init llc_proc_init(void)
113315 int rc = -ENOMEM;
113316 struct proc_dir_entry *p;
113317
113318- llc_proc_dir = proc_mkdir("llc", init_net.proc_net);
113319+ llc_proc_dir = proc_mkdir_restrict("llc", init_net.proc_net);
113320 if (!llc_proc_dir)
113321 goto out;
113322
113323diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
113324index dd4ff36..3462997 100644
113325--- a/net/mac80211/cfg.c
113326+++ b/net/mac80211/cfg.c
113327@@ -581,7 +581,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
113328 ret = ieee80211_vif_use_channel(sdata, chandef,
113329 IEEE80211_CHANCTX_EXCLUSIVE);
113330 }
113331- } else if (local->open_count == local->monitors) {
113332+ } else if (local_read(&local->open_count) == local->monitors) {
113333 local->_oper_chandef = *chandef;
113334 ieee80211_hw_config(local, 0);
113335 }
113336@@ -3468,7 +3468,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
113337 else
113338 local->probe_req_reg--;
113339
113340- if (!local->open_count)
113341+ if (!local_read(&local->open_count))
113342 break;
113343
113344 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
113345@@ -3603,8 +3603,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
113346 if (chanctx_conf) {
113347 *chandef = sdata->vif.bss_conf.chandef;
113348 ret = 0;
113349- } else if (local->open_count > 0 &&
113350- local->open_count == local->monitors &&
113351+ } else if (local_read(&local->open_count) > 0 &&
113352+ local_read(&local->open_count) == local->monitors &&
113353 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
113354 if (local->use_chanctx)
113355 *chandef = local->monitor_chandef;
113356diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
113357index 8d53d65..a4ac794 100644
113358--- a/net/mac80211/ieee80211_i.h
113359+++ b/net/mac80211/ieee80211_i.h
113360@@ -29,6 +29,7 @@
113361 #include <net/ieee80211_radiotap.h>
113362 #include <net/cfg80211.h>
113363 #include <net/mac80211.h>
113364+#include <asm/local.h>
113365 #include "key.h"
113366 #include "sta_info.h"
113367 #include "debug.h"
113368@@ -1126,7 +1127,7 @@ struct ieee80211_local {
113369 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
113370 spinlock_t queue_stop_reason_lock;
113371
113372- int open_count;
113373+ local_t open_count;
113374 int monitors, cooked_mntrs;
113375 /* number of interfaces with corresponding FIF_ flags */
113376 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
113377diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
113378index 81a2751..c06a026 100644
113379--- a/net/mac80211/iface.c
113380+++ b/net/mac80211/iface.c
113381@@ -544,7 +544,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
113382 break;
113383 }
113384
113385- if (local->open_count == 0) {
113386+ if (local_read(&local->open_count) == 0) {
113387 res = drv_start(local);
113388 if (res)
113389 goto err_del_bss;
113390@@ -591,7 +591,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
113391 res = drv_add_interface(local, sdata);
113392 if (res)
113393 goto err_stop;
113394- } else if (local->monitors == 0 && local->open_count == 0) {
113395+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
113396 res = ieee80211_add_virtual_monitor(local);
113397 if (res)
113398 goto err_stop;
113399@@ -701,7 +701,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
113400 atomic_inc(&local->iff_promiscs);
113401
113402 if (coming_up)
113403- local->open_count++;
113404+ local_inc(&local->open_count);
113405
113406 if (hw_reconf_flags)
113407 ieee80211_hw_config(local, hw_reconf_flags);
113408@@ -739,7 +739,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
113409 err_del_interface:
113410 drv_remove_interface(local, sdata);
113411 err_stop:
113412- if (!local->open_count)
113413+ if (!local_read(&local->open_count))
113414 drv_stop(local);
113415 err_del_bss:
113416 sdata->bss = NULL;
113417@@ -907,7 +907,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
113418 }
113419
113420 if (going_down)
113421- local->open_count--;
113422+ local_dec(&local->open_count);
113423
113424 switch (sdata->vif.type) {
113425 case NL80211_IFTYPE_AP_VLAN:
113426@@ -969,7 +969,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
113427 }
113428 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
113429
113430- if (local->open_count == 0)
113431+ if (local_read(&local->open_count) == 0)
113432 ieee80211_clear_tx_pending(local);
113433
113434 /*
113435@@ -1012,7 +1012,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
113436 if (cancel_scan)
113437 flush_delayed_work(&local->scan_work);
113438
113439- if (local->open_count == 0) {
113440+ if (local_read(&local->open_count) == 0) {
113441 ieee80211_stop_device(local);
113442
113443 /* no reconfiguring after stop! */
113444@@ -1023,7 +1023,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
113445 ieee80211_configure_filter(local);
113446 ieee80211_hw_config(local, hw_reconf_flags);
113447
113448- if (local->monitors == local->open_count)
113449+ if (local->monitors == local_read(&local->open_count))
113450 ieee80211_add_virtual_monitor(local);
113451 }
113452
113453diff --git a/net/mac80211/main.c b/net/mac80211/main.c
113454index 5e09d35..e2fdbe2 100644
113455--- a/net/mac80211/main.c
113456+++ b/net/mac80211/main.c
113457@@ -175,7 +175,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
113458 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
113459 IEEE80211_CONF_CHANGE_POWER);
113460
113461- if (changed && local->open_count) {
113462+ if (changed && local_read(&local->open_count)) {
113463 ret = drv_config(local, changed);
113464 /*
113465 * Goal:
113466diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
113467index ca405b6..6cc8bee 100644
113468--- a/net/mac80211/pm.c
113469+++ b/net/mac80211/pm.c
113470@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
113471 struct ieee80211_sub_if_data *sdata;
113472 struct sta_info *sta;
113473
113474- if (!local->open_count)
113475+ if (!local_read(&local->open_count))
113476 goto suspend;
113477
113478 ieee80211_scan_cancel(local);
113479@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
113480 cancel_work_sync(&local->dynamic_ps_enable_work);
113481 del_timer_sync(&local->dynamic_ps_timer);
113482
113483- local->wowlan = wowlan && local->open_count;
113484+ local->wowlan = wowlan && local_read(&local->open_count);
113485 if (local->wowlan) {
113486 int err = drv_suspend(local, wowlan);
113487 if (err < 0) {
113488@@ -126,7 +126,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
113489 WARN_ON(!list_empty(&local->chanctx_list));
113490
113491 /* stop hardware - this must stop RX */
113492- if (local->open_count)
113493+ if (local_read(&local->open_count))
113494 ieee80211_stop_device(local);
113495
113496 suspend:
113497diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
113498index d53355b..21f583a 100644
113499--- a/net/mac80211/rate.c
113500+++ b/net/mac80211/rate.c
113501@@ -724,7 +724,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
113502
113503 ASSERT_RTNL();
113504
113505- if (local->open_count)
113506+ if (local_read(&local->open_count))
113507 return -EBUSY;
113508
113509 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
113510diff --git a/net/mac80211/util.c b/net/mac80211/util.c
113511index 747bdcf..eb2b981 100644
113512--- a/net/mac80211/util.c
113513+++ b/net/mac80211/util.c
113514@@ -1741,7 +1741,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
113515 bool sched_scan_stopped = false;
113516
113517 /* nothing to do if HW shouldn't run */
113518- if (!local->open_count)
113519+ if (!local_read(&local->open_count))
113520 goto wake_up;
113521
113522 #ifdef CONFIG_PM
113523@@ -1993,7 +1993,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
113524 local->in_reconfig = false;
113525 barrier();
113526
113527- if (local->monitors == local->open_count && local->monitors > 0)
113528+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
113529 ieee80211_add_virtual_monitor(local);
113530
113531 /*
113532@@ -2048,7 +2048,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
113533 * If this is for hw restart things are still running.
113534 * We may want to change that later, however.
113535 */
113536- if (local->open_count && (!local->suspended || reconfig_due_to_wowlan))
113537+ if (local_read(&local->open_count) && (!local->suspended || reconfig_due_to_wowlan))
113538 drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART);
113539
113540 if (!local->suspended)
113541@@ -2072,7 +2072,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
113542 flush_delayed_work(&local->scan_work);
113543 }
113544
113545- if (local->open_count && !reconfig_due_to_wowlan)
113546+ if (local_read(&local->open_count) && !reconfig_due_to_wowlan)
113547 drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_SUSPEND);
113548
113549 list_for_each_entry(sdata, &local->interfaces, list) {
113550diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
113551index b02660f..c0f791c 100644
113552--- a/net/netfilter/Kconfig
113553+++ b/net/netfilter/Kconfig
113554@@ -1122,6 +1122,16 @@ config NETFILTER_XT_MATCH_ESP
113555
113556 To compile it as a module, choose M here. If unsure, say N.
113557
113558+config NETFILTER_XT_MATCH_GRADM
113559+ tristate '"gradm" match support'
113560+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
113561+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
113562+ ---help---
113563+ The gradm match allows to match on grsecurity RBAC being enabled.
113564+ It is useful when iptables rules are applied early on bootup to
113565+ prevent connections to the machine (except from a trusted host)
113566+ while the RBAC system is disabled.
113567+
113568 config NETFILTER_XT_MATCH_HASHLIMIT
113569 tristate '"hashlimit" match support'
113570 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
113571diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
113572index 89f73a9..e4e5bd9 100644
113573--- a/net/netfilter/Makefile
113574+++ b/net/netfilter/Makefile
113575@@ -139,6 +139,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
113576 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
113577 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
113578 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
113579+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
113580 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
113581 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
113582 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
113583diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
113584index d259da3..6a32b2c 100644
113585--- a/net/netfilter/ipset/ip_set_core.c
113586+++ b/net/netfilter/ipset/ip_set_core.c
113587@@ -1952,7 +1952,7 @@ done:
113588 return ret;
113589 }
113590
113591-static struct nf_sockopt_ops so_set __read_mostly = {
113592+static struct nf_sockopt_ops so_set = {
113593 .pf = PF_INET,
113594 .get_optmin = SO_IP_SET,
113595 .get_optmax = SO_IP_SET + 1,
113596diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
113597index b0f7b62..0541842 100644
113598--- a/net/netfilter/ipvs/ip_vs_conn.c
113599+++ b/net/netfilter/ipvs/ip_vs_conn.c
113600@@ -572,7 +572,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
113601 /* Increase the refcnt counter of the dest */
113602 ip_vs_dest_hold(dest);
113603
113604- conn_flags = atomic_read(&dest->conn_flags);
113605+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
113606 if (cp->protocol != IPPROTO_UDP)
113607 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
113608 flags = cp->flags;
113609@@ -922,7 +922,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
113610
113611 cp->control = NULL;
113612 atomic_set(&cp->n_control, 0);
113613- atomic_set(&cp->in_pkts, 0);
113614+ atomic_set_unchecked(&cp->in_pkts, 0);
113615
113616 cp->packet_xmit = NULL;
113617 cp->app = NULL;
113618@@ -1229,7 +1229,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
113619
113620 /* Don't drop the entry if its number of incoming packets is not
113621 located in [0, 8] */
113622- i = atomic_read(&cp->in_pkts);
113623+ i = atomic_read_unchecked(&cp->in_pkts);
113624 if (i > 8 || i < 0) return 0;
113625
113626 if (!todrop_rate[i]) return 0;
113627diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
113628index b87ca32..76c7799 100644
113629--- a/net/netfilter/ipvs/ip_vs_core.c
113630+++ b/net/netfilter/ipvs/ip_vs_core.c
113631@@ -568,7 +568,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
113632 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
113633 /* do not touch skb anymore */
113634
113635- atomic_inc(&cp->in_pkts);
113636+ atomic_inc_unchecked(&cp->in_pkts);
113637 ip_vs_conn_put(cp);
113638 return ret;
113639 }
113640@@ -1723,7 +1723,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
113641 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
113642 pkts = sysctl_sync_threshold(ipvs);
113643 else
113644- pkts = atomic_add_return(1, &cp->in_pkts);
113645+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
113646
113647 if (ipvs->sync_state & IP_VS_STATE_MASTER)
113648 ip_vs_sync_conn(net, cp, pkts);
113649diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
113650index ed99448..3ba6cad 100644
113651--- a/net/netfilter/ipvs/ip_vs_ctl.c
113652+++ b/net/netfilter/ipvs/ip_vs_ctl.c
113653@@ -799,7 +799,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
113654 */
113655 ip_vs_rs_hash(ipvs, dest);
113656 }
113657- atomic_set(&dest->conn_flags, conn_flags);
113658+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
113659
113660 /* bind the service */
113661 old_svc = rcu_dereference_protected(dest->svc, 1);
113662@@ -1664,7 +1664,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
113663 * align with netns init in ip_vs_control_net_init()
113664 */
113665
113666-static struct ctl_table vs_vars[] = {
113667+static ctl_table_no_const vs_vars[] __read_only = {
113668 {
113669 .procname = "amemthresh",
113670 .maxlen = sizeof(int),
113671@@ -1999,7 +1999,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
113672 " %-7s %-6d %-10d %-10d\n",
113673 &dest->addr.in6,
113674 ntohs(dest->port),
113675- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
113676+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
113677 atomic_read(&dest->weight),
113678 atomic_read(&dest->activeconns),
113679 atomic_read(&dest->inactconns));
113680@@ -2010,7 +2010,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
113681 "%-7s %-6d %-10d %-10d\n",
113682 ntohl(dest->addr.ip),
113683 ntohs(dest->port),
113684- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
113685+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
113686 atomic_read(&dest->weight),
113687 atomic_read(&dest->activeconns),
113688 atomic_read(&dest->inactconns));
113689@@ -2499,7 +2499,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
113690
113691 entry.addr = dest->addr.ip;
113692 entry.port = dest->port;
113693- entry.conn_flags = atomic_read(&dest->conn_flags);
113694+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
113695 entry.weight = atomic_read(&dest->weight);
113696 entry.u_threshold = dest->u_threshold;
113697 entry.l_threshold = dest->l_threshold;
113698@@ -3040,7 +3040,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
113699 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
113700 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
113701 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
113702- (atomic_read(&dest->conn_flags) &
113703+ (atomic_read_unchecked(&dest->conn_flags) &
113704 IP_VS_CONN_F_FWD_MASK)) ||
113705 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
113706 atomic_read(&dest->weight)) ||
113707@@ -3675,7 +3675,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
113708 {
113709 int idx;
113710 struct netns_ipvs *ipvs = net_ipvs(net);
113711- struct ctl_table *tbl;
113712+ ctl_table_no_const *tbl;
113713
113714 atomic_set(&ipvs->dropentry, 0);
113715 spin_lock_init(&ipvs->dropentry_lock);
113716diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
113717index 127f140..553d652 100644
113718--- a/net/netfilter/ipvs/ip_vs_lblc.c
113719+++ b/net/netfilter/ipvs/ip_vs_lblc.c
113720@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
113721 * IPVS LBLC sysctl table
113722 */
113723 #ifdef CONFIG_SYSCTL
113724-static struct ctl_table vs_vars_table[] = {
113725+static ctl_table_no_const vs_vars_table[] __read_only = {
113726 {
113727 .procname = "lblc_expiration",
113728 .data = NULL,
113729diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
113730index 2229d2d..b32b785 100644
113731--- a/net/netfilter/ipvs/ip_vs_lblcr.c
113732+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
113733@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
113734 * IPVS LBLCR sysctl table
113735 */
113736
113737-static struct ctl_table vs_vars_table[] = {
113738+static ctl_table_no_const vs_vars_table[] __read_only = {
113739 {
113740 .procname = "lblcr_expiration",
113741 .data = NULL,
113742diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
113743index d93ceeb..4556144 100644
113744--- a/net/netfilter/ipvs/ip_vs_sync.c
113745+++ b/net/netfilter/ipvs/ip_vs_sync.c
113746@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
113747 cp = cp->control;
113748 if (cp) {
113749 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
113750- pkts = atomic_add_return(1, &cp->in_pkts);
113751+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
113752 else
113753 pkts = sysctl_sync_threshold(ipvs);
113754 ip_vs_sync_conn(net, cp->control, pkts);
113755@@ -771,7 +771,7 @@ control:
113756 if (!cp)
113757 return;
113758 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
113759- pkts = atomic_add_return(1, &cp->in_pkts);
113760+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
113761 else
113762 pkts = sysctl_sync_threshold(ipvs);
113763 goto sloop;
113764@@ -902,7 +902,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
113765
113766 if (opt)
113767 memcpy(&cp->in_seq, opt, sizeof(*opt));
113768- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
113769+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
113770 cp->state = state;
113771 cp->old_state = cp->state;
113772 /*
113773diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
113774index 3aedbda..6a63567 100644
113775--- a/net/netfilter/ipvs/ip_vs_xmit.c
113776+++ b/net/netfilter/ipvs/ip_vs_xmit.c
113777@@ -1214,7 +1214,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
113778 else
113779 rc = NF_ACCEPT;
113780 /* do not touch skb anymore */
113781- atomic_inc(&cp->in_pkts);
113782+ atomic_inc_unchecked(&cp->in_pkts);
113783 goto out;
113784 }
113785
113786@@ -1307,7 +1307,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
113787 else
113788 rc = NF_ACCEPT;
113789 /* do not touch skb anymore */
113790- atomic_inc(&cp->in_pkts);
113791+ atomic_inc_unchecked(&cp->in_pkts);
113792 goto out;
113793 }
113794
113795diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
113796index a4b5e2a..13b1de3 100644
113797--- a/net/netfilter/nf_conntrack_acct.c
113798+++ b/net/netfilter/nf_conntrack_acct.c
113799@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
113800 #ifdef CONFIG_SYSCTL
113801 static int nf_conntrack_acct_init_sysctl(struct net *net)
113802 {
113803- struct ctl_table *table;
113804+ ctl_table_no_const *table;
113805
113806 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
113807 GFP_KERNEL);
113808diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
113809index 13fad86..18c984c 100644
113810--- a/net/netfilter/nf_conntrack_core.c
113811+++ b/net/netfilter/nf_conntrack_core.c
113812@@ -1733,6 +1733,10 @@ void nf_conntrack_init_end(void)
113813 #define DYING_NULLS_VAL ((1<<30)+1)
113814 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
113815
113816+#ifdef CONFIG_GRKERNSEC_HIDESYM
113817+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
113818+#endif
113819+
113820 int nf_conntrack_init_net(struct net *net)
113821 {
113822 int ret = -ENOMEM;
113823@@ -1758,7 +1762,11 @@ int nf_conntrack_init_net(struct net *net)
113824 if (!net->ct.stat)
113825 goto err_pcpu_lists;
113826
113827+#ifdef CONFIG_GRKERNSEC_HIDESYM
113828+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08x", atomic_inc_return_unchecked(&conntrack_cache_id));
113829+#else
113830 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
113831+#endif
113832 if (!net->ct.slabname)
113833 goto err_slabname;
113834
113835diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
113836index 4e78c57..ec8fb74 100644
113837--- a/net/netfilter/nf_conntrack_ecache.c
113838+++ b/net/netfilter/nf_conntrack_ecache.c
113839@@ -264,7 +264,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
113840 #ifdef CONFIG_SYSCTL
113841 static int nf_conntrack_event_init_sysctl(struct net *net)
113842 {
113843- struct ctl_table *table;
113844+ ctl_table_no_const *table;
113845
113846 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
113847 GFP_KERNEL);
113848diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
113849index bd9d315..989947e 100644
113850--- a/net/netfilter/nf_conntrack_helper.c
113851+++ b/net/netfilter/nf_conntrack_helper.c
113852@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
113853
113854 static int nf_conntrack_helper_init_sysctl(struct net *net)
113855 {
113856- struct ctl_table *table;
113857+ ctl_table_no_const *table;
113858
113859 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
113860 GFP_KERNEL);
113861diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
113862index b65d586..beec902 100644
113863--- a/net/netfilter/nf_conntrack_proto.c
113864+++ b/net/netfilter/nf_conntrack_proto.c
113865@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
113866
113867 static void
113868 nf_ct_unregister_sysctl(struct ctl_table_header **header,
113869- struct ctl_table **table,
113870+ ctl_table_no_const **table,
113871 unsigned int users)
113872 {
113873 if (users > 0)
113874diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
113875index fc823fa..8311af3 100644
113876--- a/net/netfilter/nf_conntrack_standalone.c
113877+++ b/net/netfilter/nf_conntrack_standalone.c
113878@@ -468,7 +468,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
113879
113880 static int nf_conntrack_standalone_init_sysctl(struct net *net)
113881 {
113882- struct ctl_table *table;
113883+ ctl_table_no_const *table;
113884
113885 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
113886 GFP_KERNEL);
113887diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
113888index 7a394df..bd91a8a 100644
113889--- a/net/netfilter/nf_conntrack_timestamp.c
113890+++ b/net/netfilter/nf_conntrack_timestamp.c
113891@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
113892 #ifdef CONFIG_SYSCTL
113893 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
113894 {
113895- struct ctl_table *table;
113896+ ctl_table_no_const *table;
113897
113898 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
113899 GFP_KERNEL);
113900diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
113901index 675d12c..b36e825 100644
113902--- a/net/netfilter/nf_log.c
113903+++ b/net/netfilter/nf_log.c
113904@@ -386,7 +386,7 @@ static const struct file_operations nflog_file_ops = {
113905
113906 #ifdef CONFIG_SYSCTL
113907 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
113908-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
113909+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
113910
113911 static int nf_log_proc_dostring(struct ctl_table *table, int write,
113912 void __user *buffer, size_t *lenp, loff_t *ppos)
113913@@ -417,13 +417,15 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
113914 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
113915 mutex_unlock(&nf_log_mutex);
113916 } else {
113917+ ctl_table_no_const nf_log_table = *table;
113918+
113919 mutex_lock(&nf_log_mutex);
113920 logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
113921 if (!logger)
113922- table->data = "NONE";
113923+ nf_log_table.data = "NONE";
113924 else
113925- table->data = logger->name;
113926- r = proc_dostring(table, write, buffer, lenp, ppos);
113927+ nf_log_table.data = logger->name;
113928+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
113929 mutex_unlock(&nf_log_mutex);
113930 }
113931
113932diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
113933index c68c1e5..8b5d670 100644
113934--- a/net/netfilter/nf_sockopt.c
113935+++ b/net/netfilter/nf_sockopt.c
113936@@ -43,7 +43,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
113937 }
113938 }
113939
113940- list_add(&reg->list, &nf_sockopts);
113941+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
113942 out:
113943 mutex_unlock(&nf_sockopt_mutex);
113944 return ret;
113945@@ -53,7 +53,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
113946 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
113947 {
113948 mutex_lock(&nf_sockopt_mutex);
113949- list_del(&reg->list);
113950+ pax_list_del((struct list_head *)&reg->list);
113951 mutex_unlock(&nf_sockopt_mutex);
113952 }
113953 EXPORT_SYMBOL(nf_unregister_sockopt);
113954diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
113955index 11d85b3..7fcc420 100644
113956--- a/net/netfilter/nfnetlink_log.c
113957+++ b/net/netfilter/nfnetlink_log.c
113958@@ -83,7 +83,7 @@ static int nfnl_log_net_id __read_mostly;
113959 struct nfnl_log_net {
113960 spinlock_t instances_lock;
113961 struct hlist_head instance_table[INSTANCE_BUCKETS];
113962- atomic_t global_seq;
113963+ atomic_unchecked_t global_seq;
113964 };
113965
113966 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
113967@@ -563,7 +563,7 @@ __build_packet_message(struct nfnl_log_net *log,
113968 /* global sequence number */
113969 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
113970 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
113971- htonl(atomic_inc_return(&log->global_seq))))
113972+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
113973 goto nla_put_failure;
113974
113975 if (data_len) {
113976diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
113977index 65f3e2b..2e9d6a0 100644
113978--- a/net/netfilter/nft_compat.c
113979+++ b/net/netfilter/nft_compat.c
113980@@ -317,14 +317,7 @@ static void nft_match_eval(const struct nft_expr *expr,
113981 return;
113982 }
113983
113984- switch(ret) {
113985- case true:
113986- data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
113987- break;
113988- case false:
113989- data[NFT_REG_VERDICT].verdict = NFT_BREAK;
113990- break;
113991- }
113992+ data[NFT_REG_VERDICT].verdict = ret ? NFT_CONTINUE : NFT_BREAK;
113993 }
113994
113995 static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
113996diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
113997new file mode 100644
113998index 0000000..c566332
113999--- /dev/null
114000+++ b/net/netfilter/xt_gradm.c
114001@@ -0,0 +1,51 @@
114002+/*
114003+ * gradm match for netfilter
114004